const { AutoTokenizer, RagTokenForGeneration } = require('@huggingface/transformers'); const fs = require('fs'); const path = require('path'); const axios = require('axios'); const configparser = require('configparser'); // Load Markdown files function loadMarkdownFiles(directory) { const files = fs.readdirSync(directory); const markdownFiles = []; for (const file of files) { if (path.extname(file).toLowerCase() === '.md') { const filePath = path.join(directory, file); const content = fs.readFileSync(filePath, 'utf-8'); markdownFiles.push({ name: file, content }); } } return markdownFiles; } // Initialize RAG model and tokenizer async function initRagModel() { const tokenizer = await AutoTokenizer.from_pretrained('facebook/rag-token-nq'); const model = await RagTokenForGeneration.from_pretrained('facebook/rag-token-nq'); return { tokenizer, model }; } // Retrieve relevant information from Markdown files using Ollama API async function retrieveInformation(query) { try { const config = new configparser.ConfigParser(); config.read('ollama.ini'); const host = config.get('Ollama', 'host'); const port = config.get('Ollama', 'port'); const response = await axios.post(`http://${host}:${port}/chat`, { query }); return response.data.response; } catch (error) { console.error('Error:', error.message); throw error; } } // Chatbot logic async function chatbot() { const directory = './notes'; // Directory containing Markdown files const markdownFiles = loadMarkdownFiles(directory); const ragModel = await initRagModel(); console.log('Chatbot is ready! Ask your questions.'); process.stdin.on('data', async (data) => { const query = data.toString().trim(); if (query.toLowerCase() === 'exit') { process.exit(0); } try { const response = await retrieveInformation(query); console.log(`Chatbot: ${response}`); } catch (error) { console.error('Error:', error.message); } }); } chatbot();