Updated app.py per BOLT's instructions

This commit is contained in:
PK13274 2025-04-01 10:04:46 -05:00
parent 03a38cd29e
commit b663cc8aee
1 changed files with 29 additions and 40 deletions

69
app.py
View File

@ -6,69 +6,58 @@ app = Flask(__name__)
# Load Markdown files # Load Markdown files
def loadMarkdownFiles(directory): def loadMarkdownFiles(directory):
const files = fs.readdirSync(directory); files = os.listdir(directory)
const markdownFiles = []; markdownFiles = []
for (const file of files) { for file in files:
if (path.extname(file).toLowerCase() === '.md') { if file.endswith('.md'):
const filePath = path.join(directory, file); filePath = os.path.join(directory, file)
const content = fs.readFileSync(filePath, 'utf-8'); with open(filePath, 'r', encoding='utf-8') as f:
markdownFiles.push({ name: file, content }); content = f.read()
} markdownFiles.append({'name': file, 'content': content})
}
return markdownFiles return markdownFiles
# Initialize RAG model and tokenizer # Initialize RAG model and tokenizer
async def initRagModel(): async def initRagModel():
const tokenizer = await AutoTokenizer.from_pretrained('facebook/rag-token-nq'); from transformers import AutoTokenizer, RagTokenForGeneration
const model = await RagTokenForGeneration.from_pretrained('facebook/rag-token-nq'); tokenizer = await AutoTokenizer.from_pretrained('facebook/rag-token-nq')
model = await RagTokenForGeneration.from_pretrained('facebook/rag-token-nq')
return { tokenizer, model } return {'tokenizer': tokenizer, 'model': model}
# Retrieve relevant information from Markdown files using Ollama API # Retrieve relevant information from Markdown files using Ollama API
async def retrieveInformation(query): async def retrieveInformation(query):
try: try:
response = await axios.post('http://localhost:8080/chat', { query }); config = configparser.ConfigParser()
return response.data.response; config.read('ollama.ini')
host = config.get('Ollama', 'host')
port = config.get('Ollama', 'port')
response = await axios.post(f'http://{host}:{port}/chat', {'query': query})
return response.json()['response']
except (axios.AxiosError, Exception) as error: except (axios.AxiosError, Exception) as error:
print(f'Error: {error.message}') print(f'Error: {error}')
raise raise
# Chatbot logic # Chatbot logic
async def chatbot(): async def chatbot():
const directory = './notes'; // Directory containing Markdown files directory = './notes' # Directory containing Markdown files
const markdownFiles = loadMarkdownFiles(directory); markdownFiles = loadMarkdownFiles(directory)
const ragModel = await initRagModel(); ragModel = await initRagModel()
print('Chatbot is ready! Ask your questions.') print('Chatbot is ready! Ask your questions.')
process.stdin.on('data', async (data) => { while True:
const query = data.toString().trim(); query = input().strip()
if (query.toLowerCase() === 'exit') { if query.lower() == 'exit':
process.exit(0); break
}
try: try:
const response = await retrieveInformation(query); response = await retrieveInformation(query)
print(f'Chatbot: {response}') print(f'Chatbot: {response}')
except Exception as error: except Exception as error:
print(f'Error: {error.message}') print(f'Error: {error}')
# Flask route to handle chat requests
@app.route('/chat', methods=['POST'])
async def chat():
data = request.json
query = data.get('query')
if not query:
return jsonify({'error': 'No query provided'}), 400
try:
response = await retrieveInformation(query)
return jsonify({'response': response})
except Exception as error:
return jsonify({'error': str(error)}), 500
if __name__ == '__main__': if __name__ == '__main__':
asyncio.run(chatbot()) asyncio.run(chatbot())