The rest of the files
This commit is contained in:
parent
7cb7324868
commit
03a38cd29e
|
|
@ -0,0 +1,75 @@
|
|||
from flask import Flask, request, jsonify
|
||||
import asyncio
|
||||
import axios
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
# Load Markdown files
|
||||
def loadMarkdownFiles(directory):
|
||||
const files = fs.readdirSync(directory);
|
||||
const markdownFiles = [];
|
||||
|
||||
for (const file of files) {
|
||||
if (path.extname(file).toLowerCase() === '.md') {
|
||||
const filePath = path.join(directory, file);
|
||||
const content = fs.readFileSync(filePath, 'utf-8');
|
||||
markdownFiles.push({ name: file, content });
|
||||
}
|
||||
}
|
||||
|
||||
return markdownFiles
|
||||
|
||||
# Initialize RAG model and tokenizer
|
||||
async def initRagModel():
|
||||
const tokenizer = await AutoTokenizer.from_pretrained('facebook/rag-token-nq');
|
||||
const model = await RagTokenForGeneration.from_pretrained('facebook/rag-token-nq');
|
||||
|
||||
return { tokenizer, model }
|
||||
|
||||
# Retrieve relevant information from Markdown files using Ollama API
|
||||
async def retrieveInformation(query):
|
||||
try:
|
||||
response = await axios.post('http://localhost:8080/chat', { query });
|
||||
return response.data.response;
|
||||
except (axios.AxiosError, Exception) as error:
|
||||
print(f'Error: {error.message}')
|
||||
raise
|
||||
|
||||
# Chatbot logic
|
||||
async def chatbot():
|
||||
const directory = './notes'; // Directory containing Markdown files
|
||||
const markdownFiles = loadMarkdownFiles(directory);
|
||||
const ragModel = await initRagModel();
|
||||
|
||||
print('Chatbot is ready! Ask your questions.')
|
||||
|
||||
process.stdin.on('data', async (data) => {
|
||||
const query = data.toString().trim();
|
||||
if (query.toLowerCase() === 'exit') {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
try:
|
||||
const response = await retrieveInformation(query);
|
||||
print(f'Chatbot: {response}')
|
||||
except Exception as error:
|
||||
print(f'Error: {error.message}')
|
||||
|
||||
# Flask route to handle chat requests
|
||||
@app.route('/chat', methods=['POST'])
|
||||
async def chat():
|
||||
data = request.json
|
||||
query = data.get('query')
|
||||
|
||||
if not query:
|
||||
return jsonify({'error': 'No query provided'}), 400
|
||||
|
||||
try:
|
||||
response = await retrieveInformation(query)
|
||||
return jsonify({'response': response})
|
||||
except Exception as error:
|
||||
return jsonify({'error': str(error)}), 500
|
||||
|
||||
if __name__ == '__main__':
|
||||
asyncio.run(chatbot())
|
||||
app.run(debug=True)
|
||||
|
|
@ -0,0 +1,70 @@
|
|||
const { AutoTokenizer, RagTokenForGeneration } = require('@huggingface/transformers');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const axios = require('axios');
|
||||
const configparser = require('configparser');
|
||||
|
||||
// Load Markdown files
|
||||
function loadMarkdownFiles(directory) {
|
||||
const files = fs.readdirSync(directory);
|
||||
const markdownFiles = [];
|
||||
|
||||
for (const file of files) {
|
||||
if (path.extname(file).toLowerCase() === '.md') {
|
||||
const filePath = path.join(directory, file);
|
||||
const content = fs.readFileSync(filePath, 'utf-8');
|
||||
markdownFiles.push({ name: file, content });
|
||||
}
|
||||
}
|
||||
|
||||
return markdownFiles;
|
||||
}
|
||||
|
||||
// Initialize RAG model and tokenizer
|
||||
async function initRagModel() {
|
||||
const tokenizer = await AutoTokenizer.from_pretrained('facebook/rag-token-nq');
|
||||
const model = await RagTokenForGeneration.from_pretrained('facebook/rag-token-nq');
|
||||
|
||||
return { tokenizer, model };
|
||||
}
|
||||
|
||||
// Retrieve relevant information from Markdown files using Ollama API
|
||||
async function retrieveInformation(query) {
|
||||
try {
|
||||
const config = new configparser.ConfigParser();
|
||||
config.read('ollama.ini');
|
||||
const host = config.get('Ollama', 'host');
|
||||
const port = config.get('Ollama', 'port');
|
||||
|
||||
const response = await axios.post(`http://${host}:${port}/chat`, { query });
|
||||
return response.data.response;
|
||||
} catch (error) {
|
||||
console.error('Error:', error.message);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Chatbot logic
|
||||
async function chatbot() {
|
||||
const directory = './notes'; // Directory containing Markdown files
|
||||
const markdownFiles = loadMarkdownFiles(directory);
|
||||
const ragModel = await initRagModel();
|
||||
|
||||
console.log('Chatbot is ready! Ask your questions.');
|
||||
|
||||
process.stdin.on('data', async (data) => {
|
||||
const query = data.toString().trim();
|
||||
if (query.toLowerCase() === 'exit') {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await retrieveInformation(query);
|
||||
console.log(`Chatbot: ${response}`);
|
||||
} catch (error) {
|
||||
console.error('Error:', error.message);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
chatbot();
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
[Ollama]
|
||||
host = localhost
|
||||
port = 8080
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,17 @@
|
|||
{
|
||||
"name": "chatbot",
|
||||
"version": "1.0.0",
|
||||
"description": "An AI chatbot using RAG for retrieval from Markdown files.",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"start": "node index.js"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@huggingface/transformers": "^3.4.1",
|
||||
"axios": "^1.8.4",
|
||||
"configparser": "^0.3.10",
|
||||
"flask": "^0.2.10",
|
||||
"fs": "^0.0.1-security",
|
||||
"path": "^0.12.7"
|
||||
}
|
||||
}
|
||||
Loading…
Reference in New Issue