2025-04-01 14:45:55 +00:00
|
|
|
from flask import Flask, request, jsonify
|
|
|
|
|
import asyncio
|
|
|
|
|
import axios
|
|
|
|
|
|
|
|
|
|
app = Flask(__name__)
|
|
|
|
|
|
|
|
|
|
# Load Markdown files
|
|
|
|
|
def loadMarkdownFiles(directory):
|
2025-04-01 15:04:46 +00:00
|
|
|
files = os.listdir(directory)
|
|
|
|
|
markdownFiles = []
|
2025-04-01 14:45:55 +00:00
|
|
|
|
2025-04-01 15:04:46 +00:00
|
|
|
for file in files:
|
|
|
|
|
if file.endswith('.md'):
|
|
|
|
|
filePath = os.path.join(directory, file)
|
|
|
|
|
with open(filePath, 'r', encoding='utf-8') as f:
|
|
|
|
|
content = f.read()
|
|
|
|
|
markdownFiles.append({'name': file, 'content': content})
|
2025-04-01 14:45:55 +00:00
|
|
|
|
|
|
|
|
return markdownFiles
|
|
|
|
|
|
|
|
|
|
# Initialize RAG model and tokenizer
|
|
|
|
|
async def initRagModel():
|
2025-04-01 15:04:46 +00:00
|
|
|
from transformers import AutoTokenizer, RagTokenForGeneration
|
|
|
|
|
tokenizer = await AutoTokenizer.from_pretrained('facebook/rag-token-nq')
|
|
|
|
|
model = await RagTokenForGeneration.from_pretrained('facebook/rag-token-nq')
|
2025-04-01 14:45:55 +00:00
|
|
|
|
2025-04-01 15:04:46 +00:00
|
|
|
return {'tokenizer': tokenizer, 'model': model}
|
2025-04-01 14:45:55 +00:00
|
|
|
|
|
|
|
|
# Retrieve relevant information from Markdown files using Ollama API
|
|
|
|
|
async def retrieveInformation(query):
|
|
|
|
|
try:
|
2025-04-01 15:04:46 +00:00
|
|
|
config = configparser.ConfigParser()
|
|
|
|
|
config.read('ollama.ini')
|
|
|
|
|
host = config.get('Ollama', 'host')
|
|
|
|
|
port = config.get('Ollama', 'port')
|
|
|
|
|
|
|
|
|
|
response = await axios.post(f'http://{host}:{port}/chat', {'query': query})
|
|
|
|
|
return response.json()['response']
|
2025-04-01 14:45:55 +00:00
|
|
|
except (axios.AxiosError, Exception) as error:
|
2025-04-01 15:04:46 +00:00
|
|
|
print(f'Error: {error}')
|
2025-04-01 14:45:55 +00:00
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
# Chatbot logic
|
|
|
|
|
async def chatbot():
|
2025-04-01 15:04:46 +00:00
|
|
|
directory = './notes' # Directory containing Markdown files
|
|
|
|
|
markdownFiles = loadMarkdownFiles(directory)
|
|
|
|
|
ragModel = await initRagModel()
|
2025-04-01 14:45:55 +00:00
|
|
|
|
|
|
|
|
print('Chatbot is ready! Ask your questions.')
|
|
|
|
|
|
2025-04-01 15:04:46 +00:00
|
|
|
while True:
|
|
|
|
|
query = input().strip()
|
|
|
|
|
if query.lower() == 'exit':
|
|
|
|
|
break
|
2025-04-01 14:45:55 +00:00
|
|
|
|
|
|
|
|
try:
|
2025-04-01 15:04:46 +00:00
|
|
|
response = await retrieveInformation(query)
|
2025-04-01 14:45:55 +00:00
|
|
|
print(f'Chatbot: {response}')
|
|
|
|
|
except Exception as error:
|
2025-04-01 15:04:46 +00:00
|
|
|
print(f'Error: {error}')
|
2025-04-01 14:45:55 +00:00
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
asyncio.run(chatbot())
|
|
|
|
|
app.run(debug=True)
|