Spaces:
Running
Running
<html lang="en"> | |
<head> | |
<meta charset="UTF-8"> | |
<meta name="viewport" content="width=device-width, initial-scale=1.0"> | |
<title>🤖 AI-Powered Document Search & RAG Chat</title> | |
<script type="module"> | |
// Import transformers.js 3.0.0 from CDN (new Hugging Face ownership) | |
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.0.0'; | |
// Make available globally | |
window.transformers = { pipeline, env }; | |
window.transformersLoaded = true; | |
console.log('✅ Transformers.js 3.0.0 loaded via ES modules (Hugging Face)'); | |
</script> | |
<script src="https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.0.0/dist/transformers.min.js"></script> | |
<style> | |
* { margin: 0; padding: 0; box-sizing: border-box; } | |
body { | |
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; | |
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
min-height: 100vh; | |
padding: 20px; | |
} | |
.container { | |
max-width: 1200px; | |
margin: 0 auto; | |
background: white; | |
border-radius: 20px; | |
box-shadow: 0 20px 60px rgba(0,0,0,0.1); | |
overflow: hidden; | |
} | |
.header { | |
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
color: white; | |
padding: 30px; | |
text-align: center; | |
} | |
.header h1 { font-size: 2.5em; margin-bottom: 10px; } | |
.header p { font-size: 1.2em; opacity: 0.9; } | |
.status { | |
background: #f8f9fa; | |
padding: 15px 30px; | |
border-bottom: 1px solid #e9ecef; | |
font-weight: 600; | |
color: #495057; | |
} | |
.tabs { | |
display: flex; | |
background: #f8f9fa; | |
border-bottom: 1px solid #e9ecef; | |
} | |
.tab { | |
flex: 1; | |
padding: 15px 20px; | |
background: none; | |
border: none; | |
cursor: pointer; | |
font-weight: 600; | |
font-size: 14px; | |
transition: all 0.3s; | |
border-bottom: 3px solid transparent; | |
} | |
.tab:hover { background: #e9ecef; } | |
.tab.active { background: white; border-bottom-color: #667eea; color: #667eea; } | |
.tab-content { | |
display: none; | |
padding: 30px; | |
} | |
.tab-content.active { display: block; } | |
.form-group { | |
margin-bottom: 20px; | |
} | |
label { | |
display: block; | |
margin-bottom: 5px; | |
font-weight: 600; | |
color: #495057; | |
} | |
input, textarea, select { | |
width: 100%; | |
padding: 12px; | |
border: 2px solid #e9ecef; | |
border-radius: 8px; | |
font-size: 16px; | |
transition: border-color 0.3s; | |
} | |
input:focus, textarea:focus, select:focus { | |
outline: none; | |
border-color: #667eea; | |
} | |
button { | |
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
color: white; | |
border: none; | |
padding: 12px 24px; | |
border-radius: 8px; | |
font-size: 16px; | |
font-weight: 600; | |
cursor: pointer; | |
transition: transform 0.2s; | |
} | |
button:hover { transform: translateY(-2px); } | |
button:disabled { opacity: 0.6; cursor: not-allowed; transform: none; } | |
.btn-secondary { | |
background: linear-gradient(135deg, #6c757d 0%, #495057 100%); | |
} | |
.result { | |
background: #f8f9fa; | |
border: 1px solid #e9ecef; | |
border-radius: 8px; | |
padding: 20px; | |
margin-top: 15px; | |
white-space: pre-wrap; | |
max-height: 400px; | |
overflow-y: auto; | |
} | |
.upload-section { | |
margin-bottom: 30px; | |
} | |
.upload-area { | |
border: 2px dashed #007bff; | |
border-radius: 12px; | |
padding: 40px; | |
text-align: center; | |
background: #f8f9ff; | |
cursor: pointer; | |
transition: all 0.3s ease; | |
margin: 20px 0; | |
} | |
.upload-area:hover { | |
border-color: #0056b3; | |
background: #e3f2fd; | |
} | |
.upload-area.dragover { | |
border-color: #28a745; | |
background: #e8f5e8; | |
} | |
.upload-content { | |
pointer-events: none; | |
} | |
.upload-icon { | |
font-size: 48px; | |
margin-bottom: 15px; | |
} | |
.upload-text { | |
color: #666; | |
font-size: 16px; | |
} | |
.divider { | |
text-align: center; | |
margin: 30px 0; | |
position: relative; | |
color: #666; | |
font-weight: bold; | |
background: white; | |
padding: 0 20px; | |
display: inline-block; | |
width: 100%; | |
} | |
.divider::before { | |
content: ''; | |
position: absolute; | |
top: 50%; | |
left: 0; | |
right: 0; | |
height: 1px; | |
background: #ddd; | |
z-index: 1; | |
} | |
.manual-entry { | |
margin-top: 20px; | |
} | |
.progress-container { | |
background: #f0f0f0; | |
border-radius: 6px; | |
margin: 15px 0; | |
overflow: hidden; | |
position: relative; | |
} | |
.progress-bar { | |
background: linear-gradient(45deg, #007bff, #0056b3); | |
height: 20px; | |
border-radius: 6px; | |
transition: width 0.3s ease; | |
width: 0%; | |
} | |
.progress-text { | |
position: absolute; | |
top: 50%; | |
left: 50%; | |
transform: translate(-50%, -50%); | |
font-size: 12px; | |
font-weight: bold; | |
color: #333; | |
white-space: nowrap; | |
} | |
.grid { | |
display: grid; | |
grid-template-columns: 1fr 1fr; | |
gap: 20px; | |
} | |
.alert { | |
padding: 15px; | |
border-radius: 8px; | |
margin-bottom: 20px; | |
} | |
.alert-info { | |
background: #d1ecf1; | |
border: 1px solid #b8daff; | |
color: #0c5460; | |
} | |
.alert-success { | |
background: #d4edda; | |
border: 1px solid #c3e6cb; | |
color: #155724; | |
} | |
.alert-warning { | |
background: #fff3cd; | |
border: 1px solid #ffeeba; | |
color: #856404; | |
} | |
.slider-container { | |
display: flex; | |
align-items: center; | |
gap: 15px; | |
} | |
.slider { | |
flex: 1; | |
} | |
.slider-value { | |
min-width: 40px; | |
text-align: center; | |
font-weight: 600; | |
color: #667eea; | |
} | |
.loading { | |
display: inline-block; | |
width: 20px; | |
height: 20px; | |
border: 2px solid #f3f3f3; | |
border-top: 2px solid #667eea; | |
border-radius: 50%; | |
animation: spin 1s linear infinite; | |
} | |
.progress { | |
width: 100%; | |
height: 8px; | |
background: #e9ecef; | |
border-radius: 4px; | |
overflow: hidden; | |
margin: 10px 0; | |
} | |
.progress-bar { | |
height: 100%; | |
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
transition: width 0.3s ease; | |
} | |
@keyframes spin { | |
0% { transform: rotate(0deg); } | |
100% { transform: rotate(360deg); } | |
} | |
.model-info { | |
background: #e8f4f8; | |
border: 1px solid #bee5eb; | |
border-radius: 8px; | |
padding: 15px; | |
margin: 15px 0; | |
} | |
.model-info h4 { | |
color: #0c5460; | |
margin-bottom: 8px; | |
} | |
.model-info p { | |
color: #0c5460; | |
font-size: 14px; | |
margin: 5px 0; | |
} | |
</style> | |
</head> | |
<body> | |
<div class="container"> | |
<div class="header"> | |
<h1>🤖 AI-Powered Document Search & RAG Chat</h1> | |
<p>Real transformer models running in your browser with Transformers.js</p> | |
</div> | |
<div class="status" id="status"> | |
📊 Documents: 3 | 🤖 AI Models: Not loaded | 🧠 Embedding Model: Not loaded | |
</div> | |
<div class="tabs"> | |
<button class="tab active" onclick="showTab('init')">🚀 Initialize AI</button> | |
<button class="tab" onclick="showTab('chat')">🤖 AI Chat (RAG)</button> | |
<button class="tab" onclick="showTab('llm')">🚀 LLM Chat</button> | |
<button class="tab" onclick="showTab('search')">🔍 Semantic Search</button> | |
<button class="tab" onclick="showTab('add')">📝 Add Documents</button> | |
<button class="tab" onclick="showTab('test')">🧪 System Test</button> | |
</div> | |
<!-- Initialize AI Tab --> | |
<div id="init" class="tab-content active"> | |
<div class="alert alert-info"> | |
<strong>🚀 Real AI Models!</strong> This system uses actual transformer models via Transformers.js. | |
</div> | |
<div class="model-info"> | |
<h4>🧠 Models Being Loaded:</h4> | |
<p><strong>Embedding Model:</strong> Xenova/all-MiniLM-L6-v2 (384-dimensional sentence embeddings)</p> | |
<p><strong>Q&A Model:</strong> Xenova/distilbert-base-cased-distilled-squad (Question Answering)</p> | |
<p><strong>LLM Model:</strong> Auto-selected GPT-2 or DistilGPT-2 (Transformers.js 3.0.0)</p> | |
<p><strong>Size:</strong> ~100MB total (cached after first load)</p> | |
<p><strong>Performance:</strong> CPU inference, ~2-8 seconds per operation</p> | |
<p><strong>Status:</strong> <span id="transformersStatus">⏳ Loading library...</span></p> | |
</div> | |
<div class="alert alert-warning"> | |
<strong>⚠️ First Load:</strong> Model downloading may take 1-2 minutes depending on your internet connection. Models are cached for subsequent uses. | |
</div> | |
<button onclick="initializeModels()" id="initBtn" style="font-size: 18px; padding: 15px 30px;"> | |
🚀 Initialize Real AI Models | |
</button> | |
<div id="initProgress" style="display: none;"> | |
<div class="progress"> | |
<div class="progress-bar" id="progressBar" style="width: 0%"></div> | |
</div> | |
<p id="progressText">Preparing to load models...</p> | |
</div> | |
<div id="initStatus" class="result" style="display: none;"></div> | |
</div> | |
<!-- AI Chat Tab --> | |
<div id="chat" class="tab-content"> | |
<div class="alert alert-info"> | |
<strong>🤖 Real AI Chat!</strong> Ask questions and get answers from actual transformer models. | |
</div> | |
<div class="alert alert-success"> | |
<strong>💡 Try asking:</strong><br> | |
• "What is artificial intelligence?"<br> | |
• "How does space exploration work?"<br> | |
• "What are renewable energy sources?"<br> | |
• "Explain machine learning in simple terms" | |
</div> | |
<div class="grid"> | |
<div> | |
<label for="chatQuestion">Your Question</label> | |
<textarea id="chatQuestion" rows="3" placeholder="Ask anything about the documents..."></textarea> | |
</div> | |
<div> | |
<label for="maxContext">Context Documents</label> | |
<div class="slider-container"> | |
<input type="range" id="maxContext" class="slider" min="1" max="5" value="3" oninput="updateSliderValue('maxContext')"> | |
<span id="maxContextValue" class="slider-value">3</span> | |
</div> | |
</div> | |
</div> | |
<button onclick="chatWithRAG()" id="chatBtn">🤖 Ask AI</button> | |
<div id="chatResponse" class="result" style="display: none;"></div> | |
</div> | |
<!-- LLM Chat Tab --> | |
<div id="llm" class="tab-content"> | |
<div class="alert alert-info"> | |
<strong>🚀 Pure LLM Chat!</strong> Chat with a language model (GPT-2 or Llama2.c) running in your browser. | |
</div> | |
<div class="alert alert-success"> | |
<strong>💡 Try these prompts:</strong><br> | |
• "Tell me a story about space exploration"<br> | |
• "Explain machine learning in simple terms"<br> | |
• "Write a poem about artificial intelligence"<br> | |
• "What are the benefits of renewable energy?" | |
</div> | |
<div class="grid"> | |
<div> | |
<label for="llmPrompt">Your Prompt</label> | |
<textarea id="llmPrompt" rows="3" placeholder="Enter your prompt for the language model..."></textarea> | |
</div> | |
<div> | |
<label for="maxTokens">Max Tokens</label> | |
<div class="slider-container"> | |
<input type="range" id="maxTokens" class="slider" min="20" max="200" value="100" oninput="updateSliderValue('maxTokens')"> | |
<span id="maxTokensValue" class="slider-value">100</span> | |
</div> | |
<label for="temperature">Temperature</label> | |
<div class="slider-container"> | |
<input type="range" id="temperature" class="slider" min="0.1" max="1.5" step="0.1" value="0.7" oninput="updateSliderValue('temperature')"> | |
<span id="temperatureValue" class="slider-value">0.7</span> | |
</div> | |
</div> | |
</div> | |
<div style="display: flex; gap: 10px;"> | |
<button onclick="chatWithLLM()" id="llmBtn">🚀 Generate Text</button> | |
<button class="btn-secondary" onclick="chatWithLLMRAG()" id="llmRagBtn">🤖 LLM + RAG</button> | |
</div> | |
<div id="llmResponse" class="result" style="display: none;"></div> | |
</div> | |
<!-- Semantic Search Tab --> | |
<div id="search" class="tab-content"> | |
<div class="alert alert-info"> | |
<strong>🔮 Real semantic search!</strong> Using transformer embeddings to find documents by meaning. | |
</div> | |
<div class="grid"> | |
<div> | |
<label for="searchQuery">Search Query</label> | |
<input type="text" id="searchQuery" placeholder="Try: 'machine learning', 'Mars missions', 'solar power'"> | |
</div> | |
<div> | |
<label for="maxResults">Max Results</label> | |
<div class="slider-container"> | |
<input type="range" id="maxResults" class="slider" min="1" max="10" value="5" oninput="updateSliderValue('maxResults')"> | |
<span id="maxResultsValue" class="slider-value">5</span> | |
</div> | |
</div> | |
</div> | |
<div style="display: flex; gap: 10px;"> | |
<button onclick="searchDocumentsSemantic()" id="searchBtn">🔮 Semantic Search</button> | |
<button class="btn-secondary" onclick="searchDocumentsKeyword()">🔤 Keyword Search</button> | |
</div> | |
<div id="searchResults" class="result" style="display: none;"></div> | |
</div> | |
<!-- Add Documents Tab --> | |
<div id="add" class="tab-content"> | |
<div class="alert alert-info"> | |
<strong>📚 Expand your knowledge base!</strong> Upload files or paste text with real AI embeddings. | |
</div> | |
<!-- File Upload Section --> | |
<div class="upload-section"> | |
<h4>📁 Upload Files</h4> | |
<div class="upload-area" id="uploadArea"> | |
<div class="upload-content"> | |
<div class="upload-icon">📄</div> | |
<div class="upload-text"> | |
<strong>Drop files here or click to select</strong> | |
<br>Supports: .md, .txt, .json, .csv, .html, .js, .py, .xml | |
</div> | |
</div> | |
<input type="file" id="fileInput" accept=".md,.txt,.json,.csv,.html,.js,.py,.xml,.rst,.yaml,.yml" multiple style="display: none;"> | |
</div> | |
<div id="uploadProgress" class="progress-container" style="display: none;"> | |
<div class="progress-bar" id="uploadProgressBar"></div> | |
<div class="progress-text" id="uploadProgressText">Processing files...</div> | |
</div> | |
<div id="uploadStatus" class="result" style="display: none;"></div> | |
</div> | |
<div class="divider">OR</div> | |
<!-- Manual Entry Section --> | |
<div class="manual-entry"> | |
<h4>✏️ Manual Entry</h4> | |
<div class="form-group"> | |
<label for="docTitle">Document Title (optional)</label> | |
<input type="text" id="docTitle" placeholder="Enter document title..."> | |
</div> | |
<div class="form-group"> | |
<label for="docContent">Document Content</label> | |
<textarea id="docContent" rows="8" placeholder="Paste your document text here..."></textarea> | |
</div> | |
<button onclick="addDocumentManual()" id="addBtn">📝 Add Document</button> | |
<div class="grid"> | |
<div id="addStatus" class="result" style="display: none;"></div> | |
<div id="docPreview" class="result" style="display: none;"></div> | |
</div> | |
</div> | |
</div> | |
<!-- System Test Tab --> | |
<div id="test" class="tab-content"> | |
<div class="alert alert-info"> | |
<strong>🧪 Test the system</strong> to verify AI models are working correctly. | |
</div> | |
<button onclick="testSystem()" id="testBtn">🧪 Run System Test</button> | |
<div id="testOutput" class="result" style="display: none;"></div> | |
</div> | |
</div> | |
<script> | |
// Global variables for transformers.js | |
let pipeline = null; | |
let env = null; | |
let transformersReady = false; | |
// Initialize transformers.js when the script loads | |
async function initTransformers() { | |
try { | |
console.log('🔄 Initializing Transformers.js...'); | |
// Try ES modules first (preferred method) | |
if (window.transformers && window.transformersLoaded) { | |
console.log('✅ Using ES modules version (Transformers.js 3.0.0)'); | |
({ pipeline, env } = window.transformers); | |
} | |
// Fallback to UMD version | |
else if (window.Transformers) { | |
console.log('✅ Using UMD version (Transformers.js 3.0.0)'); | |
({ pipeline, env } = window.Transformers); | |
} | |
// Wait for library to load | |
else { | |
console.log('⏳ Waiting for library to load...'); | |
let attempts = 0; | |
while (!window.Transformers && !window.transformersLoaded && attempts < 50) { | |
await new Promise(resolve => setTimeout(resolve, 200)); | |
attempts++; | |
} | |
if (window.transformers && window.transformersLoaded) { | |
({ pipeline, env } = window.transformers); | |
} else if (window.Transformers) { | |
({ pipeline, env } = window.Transformers); | |
} else { | |
throw new Error('Failed to load Transformers.js library'); | |
} | |
} | |
// Configure transformers.js with minimal settings | |
if (env) { | |
env.allowLocalModels = false; | |
env.allowRemoteModels = true; | |
// Let Transformers.js use default WASM paths for better compatibility | |
} | |
transformersReady = true; | |
console.log('✅ Transformers.js initialized successfully'); | |
// Update UI to show ready state | |
updateStatus(); | |
// Update status indicator | |
const statusSpan = document.getElementById('transformersStatus'); | |
if (statusSpan) { | |
statusSpan.textContent = '✅ Ready!'; | |
statusSpan.style.color = 'green'; | |
} | |
} catch (error) { | |
console.error('❌ Error initializing Transformers.js:', error); | |
// Show error in UI | |
const statusDiv = document.getElementById('status'); | |
if (statusDiv) { | |
statusDiv.textContent = `❌ Failed to load Transformers.js: ${error.message}`; | |
statusDiv.style.color = 'red'; | |
} | |
// Update status indicator | |
const statusSpan = document.getElementById('transformersStatus'); | |
if (statusSpan) { | |
statusSpan.textContent = `❌ Failed: ${error.message}`; | |
statusSpan.style.color = 'red'; | |
} | |
} | |
} | |
// Initialize when page loads | |
document.addEventListener('DOMContentLoaded', function() { | |
initTransformers(); | |
initFileUpload(); | |
}); | |
// Document storage and AI state | |
let documents = [ | |
{ | |
id: 0, | |
title: "Artificial Intelligence Overview", | |
content: "Artificial Intelligence (AI) is a branch of computer science that aims to create intelligent machines that work and react like humans. Some activities computers with AI are designed for include speech recognition, learning, planning, and problem-solving. AI is used in healthcare, finance, transportation, and entertainment. Machine learning enables computers to learn from experience without explicit programming. Deep learning uses neural networks to understand complex patterns in data.", | |
embedding: null | |
}, | |
{ | |
id: 1, | |
title: "Space Exploration", | |
content: "Space exploration is the ongoing discovery and exploration of celestial structures in outer space through evolving space technology. Physical exploration is conducted by unmanned robotic probes and human spaceflight. Space exploration has been used for geopolitical rivalries like the Cold War. The early era was driven by a Space Race between the Soviet Union and United States. Modern exploration includes Mars missions, the International Space Station, and satellite programs.", | |
embedding: null | |
}, | |
{ | |
id: 2, | |
title: "Renewable Energy", | |
content: "Renewable energy comes from naturally replenished resources on a human timescale. It includes sunlight, wind, rain, tides, waves, and geothermal heat. Renewable energy contrasts with fossil fuels that are used faster than replenished. Most renewable sources are sustainable. Solar energy is abundant and promising. Wind energy and hydroelectric power are major contributors to renewable generation worldwide.", | |
embedding: null | |
} | |
]; | |
let embeddingModel = null; | |
let qaModel = null; | |
let llmModel = null; | |
let loadedModelName = ''; | |
let modelsInitialized = false; | |
// Calculate cosine similarity between two vectors | |
function cosineSimilarity(a, b) { | |
const dotProduct = a.reduce((sum, val, i) => sum + val * b[i], 0); | |
const magnitudeA = Math.sqrt(a.reduce((sum, val) => sum + val * val, 0)); | |
const magnitudeB = Math.sqrt(b.reduce((sum, val) => sum + val * val, 0)); | |
if (magnitudeA === 0 || magnitudeB === 0) return 0; | |
return dotProduct / (magnitudeA * magnitudeB); | |
} | |
// UI Functions | |
function showTab(tabName) { | |
// Hide all tabs | |
document.querySelectorAll('.tab-content').forEach(tab => { | |
tab.classList.remove('active'); | |
}); | |
document.querySelectorAll('.tab').forEach(button => { | |
button.classList.remove('active'); | |
}); | |
// Show selected tab | |
document.getElementById(tabName).classList.add('active'); | |
event.target.classList.add('active'); | |
} | |
function updateSliderValue(sliderId) { | |
const slider = document.getElementById(sliderId); | |
const valueSpan = document.getElementById(sliderId + 'Value'); | |
valueSpan.textContent = slider.value; | |
} | |
function updateStatus() { | |
const status = document.getElementById('status'); | |
const transformersStatus = transformersReady ? 'Ready' : 'Not ready'; | |
const embeddingStatus = embeddingModel ? 'Loaded' : 'Not loaded'; | |
const qaStatus = qaModel ? 'Loaded' : 'Not loaded'; | |
const llmStatus = llmModel ? 'Loaded' : 'Not loaded'; | |
status.textContent = `📊 Documents: ${documents.length} | 🔧 Transformers.js: ${transformersStatus} | 🤖 QA: ${qaStatus} | 🧠 Embedding: ${embeddingStatus} | 🚀 LLM: ${llmStatus}`; | |
} | |
function updateProgress(percent, text) { | |
const progressBar = document.getElementById('progressBar'); | |
const progressText = document.getElementById('progressText'); | |
progressBar.style.width = percent + '%'; | |
progressText.textContent = text; | |
} | |
// AI Functions | |
async function initializeModels() { | |
const statusDiv = document.getElementById('initStatus'); | |
const progressDiv = document.getElementById('initProgress'); | |
const initBtn = document.getElementById('initBtn'); | |
statusDiv.style.display = 'block'; | |
progressDiv.style.display = 'block'; | |
initBtn.disabled = true; | |
try { | |
// Check if transformers.js is ready | |
if (!transformersReady || !pipeline) { | |
updateProgress(5, "Waiting for Transformers.js to initialize..."); | |
statusDiv.innerHTML = '🔄 Initializing Transformers.js library...'; | |
// Wait for transformers.js to be ready | |
let attempts = 0; | |
while (!transformersReady && attempts < 30) { | |
await new Promise(resolve => setTimeout(resolve, 1000)); | |
attempts++; | |
} | |
if (!transformersReady) { | |
throw new Error('Transformers.js failed to initialize. Please refresh the page.'); | |
} | |
} | |
updateProgress(10, "Loading embedding model..."); | |
statusDiv.innerHTML = '🔄 Loading embedding model (Xenova/all-MiniLM-L6-v2)...'; | |
// Load embedding model with progress tracking | |
embeddingModel = await pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2', { | |
progress_callback: (progress) => { | |
if (progress.status === 'downloading') { | |
const percent = progress.loaded && progress.total ? | |
Math.round((progress.loaded / progress.total) * 100) : 0; | |
statusDiv.innerHTML = `🔄 Downloading embedding model: ${percent}%`; | |
} | |
} | |
}); | |
updateProgress(40, "Loading question-answering model..."); | |
statusDiv.innerHTML = '🔄 Loading QA model (Xenova/distilbert-base-cased-distilled-squad)...'; | |
// Load QA model with progress tracking | |
qaModel = await pipeline('question-answering', 'Xenova/distilbert-base-cased-distilled-squad', { | |
progress_callback: (progress) => { | |
if (progress.status === 'downloading') { | |
const percent = progress.loaded && progress.total ? | |
Math.round((progress.loaded / progress.total) * 100) : 0; | |
statusDiv.innerHTML = `🔄 Downloading QA model: ${percent}%`; | |
} | |
} | |
}); | |
updateProgress(70, "Loading language model..."); | |
statusDiv.innerHTML = '🔄 Loading LLM (trying SmolLM models)...'; | |
// Load LLM model - Stable Transformers.js 3.0.0 configuration | |
const modelsToTry = [ | |
{ | |
name: 'Xenova/gpt2', | |
options: {} | |
}, | |
{ | |
name: 'Xenova/distilgpt2', | |
options: {} | |
} | |
]; | |
let modelLoaded = false; | |
for (const model of modelsToTry) { | |
try { | |
console.log(`Trying to load ${model.name}...`); | |
statusDiv.innerHTML = `🔄 Loading LLM (${model.name})...`; | |
// Load LLM with progress tracking | |
llmModel = await pipeline('text-generation', model.name, { | |
progress_callback: (progress) => { | |
if (progress.status === 'downloading') { | |
const percent = progress.loaded && progress.total ? | |
Math.round((progress.loaded / progress.total) * 100) : 0; | |
statusDiv.innerHTML = `🔄 Downloading ${model.name}: ${percent}%`; | |
} | |
} | |
}); | |
console.log(`✅ Successfully loaded ${model.name}`); | |
loadedModelName = model.name; | |
modelLoaded = true; | |
break; | |
} catch (error) { | |
console.warn(`${model.name} failed:`, error); | |
} | |
} | |
if (!modelLoaded) { | |
throw new Error('Failed to load any LLM model'); | |
} | |
updateProgress(85, "Generating embeddings for documents..."); | |
statusDiv.innerHTML = '🔄 Generating embeddings for existing documents...'; | |
// Generate embeddings for all existing documents | |
for (let i = 0; i < documents.length; i++) { | |
const doc = documents[i]; | |
updateProgress(85 + (i / documents.length) * 10, `Processing document ${i + 1}/${documents.length}...`); | |
doc.embedding = await generateEmbedding(doc.content); | |
} | |
updateProgress(100, "Initialization complete!"); | |
modelsInitialized = true; | |
statusDiv.innerHTML = `✅ AI Models initialized successfully! | |
🧠 Embedding Model: Xenova/all-MiniLM-L6-v2 (384 dimensions) | |
🤖 QA Model: Xenova/distilbert-base-cased-distilled-squad | |
🚀 LLM Model: ${loadedModelName} (Language model for text generation) | |
📚 Documents processed: ${documents.length} | |
🔮 Ready for semantic search, Q&A, and LLM chat! | |
📊 Model Info: | |
• Embedding model size: ~23MB | |
• QA model size: ~28MB | |
• LLM model size: ~15-50MB (depending on model loaded) | |
• Total memory usage: ~70-100MB | |
• Inference speed: ~2-8 seconds per operation`; | |
updateStatus(); | |
} catch (error) { | |
console.error('Error initializing models:', error); | |
statusDiv.innerHTML = `❌ Error initializing models: ${error.message} | |
Please check your internet connection and try again.`; | |
updateProgress(0, "Initialization failed"); | |
} finally { | |
initBtn.disabled = false; | |
setTimeout(() => { | |
progressDiv.style.display = 'none'; | |
}, 2000); | |
} | |
} | |
async function generateEmbedding(text) { | |
if (!transformersReady || !pipeline) { | |
throw new Error('Transformers.js not initialized'); | |
} | |
if (!embeddingModel) { | |
throw new Error('Embedding model not loaded'); | |
} | |
try { | |
const output = await embeddingModel(text, { pooling: 'mean', normalize: true }); | |
return Array.from(output.data); | |
} catch (error) { | |
console.error('Error generating embedding:', error); | |
throw error; | |
} | |
} | |
async function searchDocumentsSemantic() { | |
const query = document.getElementById('searchQuery').value; | |
const maxResults = parseInt(document.getElementById('maxResults').value); | |
const resultsDiv = document.getElementById('searchResults'); | |
const searchBtn = document.getElementById('searchBtn'); | |
if (!query.trim()) { | |
resultsDiv.style.display = 'block'; | |
resultsDiv.textContent = '❌ Please enter a search query'; | |
return; | |
} | |
if (!transformersReady || !modelsInitialized || !embeddingModel) { | |
resultsDiv.style.display = 'block'; | |
resultsDiv.textContent = '❌ Please initialize AI models first!'; | |
return; | |
} | |
resultsDiv.style.display = 'block'; | |
resultsDiv.innerHTML = '<div class="loading"></div> Generating query embedding and searching...'; | |
searchBtn.disabled = true; | |
try { | |
// Generate embedding for query | |
const queryEmbedding = await generateEmbedding(query); | |
// Calculate similarities | |
const results = []; | |
documents.forEach(doc => { | |
if (doc.embedding) { | |
const similarity = cosineSimilarity(queryEmbedding, doc.embedding); | |
results.push({ doc, similarity }); | |
} | |
}); | |
// Sort by similarity | |
results.sort((a, b) => b.similarity - a.similarity); | |
if (results.length === 0) { | |
resultsDiv.textContent = `❌ No documents with embeddings found for '${query}'`; | |
return; | |
} | |
let output = `🔍 Semantic search results for '${query}':\n\n`; | |
results.slice(0, maxResults).forEach((result, i) => { | |
const doc = result.doc; | |
const similarity = result.similarity; | |
const excerpt = doc.content.length > 200 ? doc.content.substring(0, 200) + '...' : doc.content; | |
output += `**Result ${i + 1}** (similarity: ${similarity.toFixed(3)})\n📄 Title: ${doc.title}\n📝 Content: ${excerpt}\n\n`; | |
}); | |
resultsDiv.textContent = output; | |
} catch (error) { | |
console.error('Search error:', error); | |
resultsDiv.textContent = `❌ Error during search: ${error.message}`; | |
} finally { | |
searchBtn.disabled = false; | |
} | |
} | |
function searchDocumentsKeyword() { | |
const query = document.getElementById('searchQuery').value; | |
const maxResults = parseInt(document.getElementById('maxResults').value); | |
const resultsDiv = document.getElementById('searchResults'); | |
if (!query.trim()) { | |
resultsDiv.style.display = 'block'; | |
resultsDiv.textContent = '❌ Please enter a search query'; | |
return; | |
} | |
resultsDiv.style.display = 'block'; | |
resultsDiv.innerHTML = '<div class="loading"></div> Searching keywords...'; | |
setTimeout(() => { | |
const results = []; | |
const queryWords = query.toLowerCase().split(/\s+/); | |
documents.forEach(doc => { | |
const contentLower = doc.content.toLowerCase(); | |
const titleLower = doc.title.toLowerCase(); | |
let matches = 0; | |
queryWords.forEach(word => { | |
matches += (contentLower.match(new RegExp(word, 'g')) || []).length; | |
matches += (titleLower.match(new RegExp(word, 'g')) || []).length * 2; | |
}); | |
if (matches > 0) { | |
results.push({ doc, score: matches }); | |
} | |
}); | |
results.sort((a, b) => b.score - a.score); | |
if (results.length === 0) { | |
resultsDiv.textContent = `❌ No documents found containing '${query}'`; | |
return; | |
} | |
let output = `🔍 Keyword search results for '${query}':\n\n`; | |
results.slice(0, maxResults).forEach((result, i) => { | |
const doc = result.doc; | |
const excerpt = doc.content.length > 200 ? doc.content.substring(0, 200) + '...' : doc.content; | |
output += `**Result ${i + 1}**\n📄 Title: ${doc.title}\n📝 Content: ${excerpt}\n\n`; | |
}); | |
resultsDiv.textContent = output; | |
}, 500); | |
} | |
async function chatWithRAG() { | |
const question = document.getElementById('chatQuestion').value; | |
const maxContext = parseInt(document.getElementById('maxContext').value); | |
const responseDiv = document.getElementById('chatResponse'); | |
const chatBtn = document.getElementById('chatBtn'); | |
if (!question.trim()) { | |
responseDiv.style.display = 'block'; | |
responseDiv.textContent = '❌ Please enter a question'; | |
return; | |
} | |
if (!transformersReady || !modelsInitialized || !embeddingModel || !qaModel) { | |
responseDiv.style.display = 'block'; | |
responseDiv.textContent = '❌ AI models not loaded yet. Please initialize them first!'; | |
return; | |
} | |
responseDiv.style.display = 'block'; | |
responseDiv.innerHTML = '<div class="loading"></div> Generating answer with real AI...'; | |
chatBtn.disabled = true; | |
try { | |
// Generate embedding for the question | |
const questionEmbedding = await generateEmbedding(question); | |
// Find relevant documents using semantic similarity | |
const relevantDocs = []; | |
documents.forEach(doc => { | |
if (doc.embedding) { | |
const similarity = cosineSimilarity(questionEmbedding, doc.embedding); | |
if (similarity > 0.1) { | |
relevantDocs.push({ doc, similarity }); | |
} | |
} | |
}); | |
relevantDocs.sort((a, b) => b.similarity - a.similarity); | |
relevantDocs.splice(maxContext); | |
if (relevantDocs.length === 0) { | |
responseDiv.textContent = '❌ No relevant context found in the documents for your question.'; | |
return; | |
} | |
// Combine context from top documents | |
const context = relevantDocs.map(item => item.doc.content).join(' ').substring(0, 2000); | |
// Use the QA model to generate an answer | |
const qaResult = await qaModel(question, context); | |
let response = `🤖 AI Answer:\n${qaResult.answer}\n\n`; | |
response += `📊 Confidence: ${(qaResult.score * 100).toFixed(1)}%\n\n`; | |
response += `📚 Sources: ${relevantDocs.length} documents\n`; | |
response += `🔍 Best match: "${relevantDocs[0].doc.title}" (similarity: ${relevantDocs[0].similarity.toFixed(3)})\n\n`; | |
response += `📝 Context used:\n${context.substring(0, 300)}...`; | |
responseDiv.textContent = response; | |
} catch (error) { | |
console.error('Chat error:', error); | |
responseDiv.textContent = `❌ Error generating response: ${error.message}`; | |
} finally { | |
chatBtn.disabled = false; | |
} | |
} | |
async function chatWithLLM() { | |
const prompt = document.getElementById('llmPrompt').value; | |
const maxTokens = parseInt(document.getElementById('maxTokens').value); | |
const temperature = parseFloat(document.getElementById('temperature').value); | |
const responseDiv = document.getElementById('llmResponse'); | |
const llmBtn = document.getElementById('llmBtn'); | |
if (!prompt.trim()) { | |
responseDiv.style.display = 'block'; | |
responseDiv.textContent = '❌ Please enter a prompt'; | |
return; | |
} | |
if (!transformersReady || !modelsInitialized || !llmModel) { | |
responseDiv.style.display = 'block'; | |
responseDiv.textContent = '❌ LLM model not loaded yet. Please initialize models first!'; | |
return; | |
} | |
responseDiv.style.display = 'block'; | |
responseDiv.innerHTML = '<div class="loading"></div> Generating text with LLM...'; | |
llmBtn.disabled = true; | |
try { | |
// Generate text with the LLM | |
const result = await llmModel(prompt, { | |
max_new_tokens: maxTokens, | |
temperature: temperature, | |
do_sample: true, | |
return_full_text: false | |
}); | |
let generatedText = result[0].generated_text; | |
let response = `🚀 LLM Generated Text:\n\n"${generatedText}"\n\n`; | |
response += `📊 Settings: ${maxTokens} tokens, temperature ${temperature}\n`; | |
response += `🤖 Model: ${loadedModelName ? loadedModelName.split('/')[1] : 'Language Model'}\n`; | |
response += `⏱️ Generated in real-time by your browser!`; | |
responseDiv.textContent = response; | |
} catch (error) { | |
console.error('LLM error:', error); | |
responseDiv.textContent = `❌ Error generating text: ${error.message}`; | |
} finally { | |
llmBtn.disabled = false; | |
} | |
} | |
async function chatWithLLMRAG() { | |
const prompt = document.getElementById('llmPrompt').value; | |
const maxTokens = parseInt(document.getElementById('maxTokens').value); | |
const temperature = parseFloat(document.getElementById('temperature').value); | |
const responseDiv = document.getElementById('llmResponse'); | |
const llmRagBtn = document.getElementById('llmRagBtn'); | |
if (!prompt.trim()) { | |
responseDiv.style.display = 'block'; | |
responseDiv.textContent = '❌ Please enter a prompt'; | |
return; | |
} | |
if (!transformersReady || !modelsInitialized || !llmModel || !embeddingModel) { | |
responseDiv.style.display = 'block'; | |
responseDiv.textContent = '❌ Models not loaded yet. Please initialize all models first!'; | |
return; | |
} | |
responseDiv.style.display = 'block'; | |
responseDiv.innerHTML = '<div class="loading"></div> Finding relevant context and generating with LLM...'; | |
llmRagBtn.disabled = true; | |
try { | |
// Find relevant documents using semantic search | |
const queryEmbedding = await generateEmbedding(prompt); | |
const relevantDocs = []; | |
documents.forEach(doc => { | |
if (doc.embedding) { | |
const similarity = cosineSimilarity(queryEmbedding, doc.embedding); | |
if (similarity > 0.1) { | |
relevantDocs.push({ doc, similarity }); | |
} | |
} | |
}); | |
relevantDocs.sort((a, b) => b.similarity - a.similarity); | |
relevantDocs.splice(3); // Limit to top 3 documents | |
// Create enhanced prompt with context | |
let enhancedPrompt = prompt; | |
if (relevantDocs.length > 0) { | |
const context = relevantDocs.map(item => item.doc.content.substring(0, 300)).join(' '); | |
enhancedPrompt = `Context: ${context}\n\nQuestion: ${prompt}\n\nAnswer:`; | |
} | |
// Generate text with the LLM using enhanced prompt | |
const result = await llmModel(enhancedPrompt, { | |
max_new_tokens: maxTokens, | |
temperature: temperature, | |
do_sample: true, | |
return_full_text: false | |
}); | |
let generatedText = result[0].generated_text; | |
let response = `🤖 LLM + RAG Generated Response:\n\n"${generatedText}"\n\n`; | |
response += `📚 Context: ${relevantDocs.length} relevant documents used\n`; | |
if (relevantDocs.length > 0) { | |
response += `🔍 Best match: "${relevantDocs[0].doc.title}" (similarity: ${relevantDocs[0].similarity.toFixed(3)})\n`; | |
} | |
response += `📊 Settings: ${maxTokens} tokens, temperature ${temperature}\n`; | |
response += `🚀 Model: ${loadedModelName ? loadedModelName.split('/')[1] : 'LLM'} enhanced with document retrieval`; | |
responseDiv.textContent = response; | |
} catch (error) { | |
console.error('LLM+RAG error:', error); | |
responseDiv.textContent = `❌ Error generating response: ${error.message}`; | |
} finally { | |
llmRagBtn.disabled = false; | |
} | |
} | |
async function addDocument() { | |
const title = document.getElementById('docTitle').value || `User Document ${documents.length - 2}`; | |
const content = document.getElementById('docContent').value; | |
const statusDiv = document.getElementById('addStatus'); | |
const previewDiv = document.getElementById('docPreview'); | |
const addBtn = document.getElementById('addBtn'); | |
if (!content.trim()) { | |
statusDiv.style.display = 'block'; | |
statusDiv.textContent = '❌ Please enter document content'; | |
previewDiv.style.display = 'none'; | |
return; | |
} | |
statusDiv.style.display = 'block'; | |
statusDiv.innerHTML = '<div class="loading"></div> Adding document...'; | |
addBtn.disabled = true; | |
try { | |
const docId = documents.length; | |
const newDocument = { | |
id: docId, | |
title: title, | |
content: content.trim(), | |
embedding: null | |
}; | |
// Generate embedding if models are initialized | |
if (transformersReady && modelsInitialized && embeddingModel) { | |
statusDiv.innerHTML = '<div class="loading"></div> Generating AI embedding...'; | |
newDocument.embedding = await generateEmbedding(content); | |
} | |
documents.push(newDocument); | |
const preview = content.length > 300 ? content.substring(0, 300) + '...' : content; | |
const status = `✅ Document added successfully! | |
📄 Title: ${title} | |
📊 Size: ${content.length.toLocaleString()} characters | |
📚 Total documents: ${documents.length}${(transformersReady && modelsInitialized) ? '\n🧠 AI embedding generated automatically' : '\n⚠️ AI embedding will be generated when models are loaded'}`; | |
statusDiv.textContent = status; | |
previewDiv.style.display = 'block'; | |
previewDiv.textContent = `📖 Preview:\n${preview}`; | |
// Clear form | |
document.getElementById('docTitle').value = ''; | |
document.getElementById('docContent').value = ''; | |
updateStatus(); | |
} catch (error) { | |
console.error('Error adding document:', error); | |
statusDiv.textContent = `❌ Error adding document: ${error.message}`; | |
} finally { | |
addBtn.disabled = false; | |
} | |
} | |
// File upload functionality | |
function initFileUpload() { | |
const uploadArea = document.getElementById('uploadArea'); | |
const fileInput = document.getElementById('fileInput'); | |
if (!uploadArea || !fileInput) return; | |
// Click to select files | |
uploadArea.addEventListener('click', () => { | |
fileInput.click(); | |
}); | |
// Drag and drop functionality | |
uploadArea.addEventListener('dragover', (e) => { | |
e.preventDefault(); | |
uploadArea.classList.add('dragover'); | |
}); | |
uploadArea.addEventListener('dragleave', (e) => { | |
e.preventDefault(); | |
uploadArea.classList.remove('dragover'); | |
}); | |
uploadArea.addEventListener('drop', (e) => { | |
e.preventDefault(); | |
uploadArea.classList.remove('dragover'); | |
const files = e.dataTransfer.files; | |
handleFiles(files); | |
}); | |
// File input change | |
fileInput.addEventListener('change', (e) => { | |
handleFiles(e.target.files); | |
}); | |
} | |
async function handleFiles(files) { | |
const uploadStatus = document.getElementById('uploadStatus'); | |
const uploadProgress = document.getElementById('uploadProgress'); | |
const uploadProgressBar = document.getElementById('uploadProgressBar'); | |
const uploadProgressText = document.getElementById('uploadProgressText'); | |
if (files.length === 0) return; | |
uploadStatus.style.display = 'block'; | |
uploadProgress.style.display = 'block'; | |
uploadStatus.textContent = ''; | |
let successCount = 0; | |
let errorCount = 0; | |
for (let i = 0; i < files.length; i++) { | |
const file = files[i]; | |
const progress = ((i + 1) / files.length) * 100; | |
uploadProgressBar.style.width = progress + '%'; | |
if (file.size > 10000) { | |
uploadProgressText.textContent = `Processing large file: ${file.name} (${i + 1}/${files.length}) - chunking for better search...`; | |
} else { | |
uploadProgressText.textContent = `Processing ${file.name} (${i + 1}/${files.length})...`; | |
} | |
try { | |
await processFile(file); | |
successCount++; | |
} catch (error) { | |
console.error(`Error processing ${file.name}:`, error); | |
errorCount++; | |
} | |
} | |
uploadProgress.style.display = 'none'; | |
let statusText = `✅ Upload complete!\n📁 ${successCount} files processed successfully`; | |
if (errorCount > 0) { | |
statusText += `\n❌ ${errorCount} files failed to process`; | |
} | |
statusText += `\n📊 Total documents: ${documents.length}`; | |
statusText += `\n🧩 Large files automatically chunked for better search`; | |
uploadStatus.textContent = statusText; | |
updateStatus(); | |
// Clear file input | |
document.getElementById('fileInput').value = ''; | |
} | |
// Document chunking function for large files | |
function chunkDocument(content, maxChunkSize = 1000) { | |
const sentences = content.split(/[.!?]+/).filter(s => s.trim().length > 0); | |
const chunks = []; | |
let currentChunk = ''; | |
for (let sentence of sentences) { | |
sentence = sentence.trim(); | |
if (currentChunk.length + sentence.length > maxChunkSize && currentChunk.length > 0) { | |
chunks.push(currentChunk.trim()); | |
currentChunk = sentence; | |
} else { | |
currentChunk += (currentChunk ? '. ' : '') + sentence; | |
} | |
} | |
if (currentChunk.trim()) { | |
chunks.push(currentChunk.trim()); | |
} | |
return chunks.length > 0 ? chunks : [content]; | |
} | |
async function processFile(file) { | |
return new Promise((resolve, reject) => { | |
const reader = new FileReader(); | |
reader.onload = async function(e) { | |
try { | |
const content = e.target.result.trim(); | |
const baseTitle = file.name.replace(/\.[^/.]+$/, ""); // Remove file extension | |
// Check if document is large and needs chunking | |
if (content.length > 2000) { | |
// Chunk large documents | |
const chunks = chunkDocument(content, 1500); | |
console.log(`📄 Chunking large file: ${chunks.length} chunks created from ${content.length} characters`); | |
for (let i = 0; i < chunks.length; i++) { | |
const chunkTitle = chunks.length > 1 ? `${baseTitle} (Part ${i + 1}/${chunks.length})` : baseTitle; | |
const newDocument = { | |
id: documents.length, | |
title: chunkTitle, | |
content: chunks[i], | |
embedding: null | |
}; | |
// Generate embedding if models are loaded | |
if (transformersReady && modelsInitialized && embeddingModel) { | |
newDocument.embedding = await generateEmbedding(chunks[i]); | |
} | |
documents.push(newDocument); | |
} | |
} else { | |
// Small document - process as single document | |
const newDocument = { | |
id: documents.length, | |
title: baseTitle, | |
content: content, | |
embedding: null | |
}; | |
// Generate embedding if models are loaded | |
if (transformersReady && modelsInitialized && embeddingModel) { | |
newDocument.embedding = await generateEmbedding(content); | |
} | |
documents.push(newDocument); | |
} | |
resolve(); | |
} catch (error) { | |
reject(error); | |
} | |
}; | |
reader.onerror = function() { | |
reject(new Error(`Failed to read file: ${file.name}`)); | |
}; | |
// Read file as text | |
reader.readAsText(file); | |
}); | |
} | |
async function testSystem() { | |
const outputDiv = document.getElementById('testOutput'); | |
const testBtn = document.getElementById('testBtn'); | |
outputDiv.style.display = 'block'; | |
outputDiv.innerHTML = '<div class="loading"></div> Running system tests...'; | |
testBtn.disabled = true; | |
try { | |
let output = `🧪 System Test Results:\n\n`; | |
output += `📊 Documents: ${documents.length} loaded\n`; | |
output += `🔧 Transformers.js: ${transformersReady ? '✅ Ready' : '❌ Not ready'}\n`; | |
output += `🧠 Embedding Model: ${embeddingModel ? '✅ Loaded' : '❌ Not loaded'}\n`; | |
output += `🤖 QA Model: ${qaModel ? '✅ Loaded' : '❌ Not loaded'}\n`; | |
output += `🚀 LLM Model: ${llmModel ? '✅ Loaded' : '❌ Not loaded'}\n\n`; | |
if (transformersReady && modelsInitialized && embeddingModel) { | |
output += `🔍 Testing embedding generation...\n`; | |
const testEmbedding = await generateEmbedding("test sentence"); | |
output += `✅ Embedding test: Generated ${testEmbedding.length}D vector\n\n`; | |
output += `🔍 Testing semantic search...\n`; | |
const testQuery = "artificial intelligence"; | |
const queryEmbedding = await generateEmbedding(testQuery); | |
let testResults = []; | |
documents.forEach(doc => { | |
if (doc.embedding) { | |
const similarity = cosineSimilarity(queryEmbedding, doc.embedding); | |
testResults.push({ doc, similarity }); | |
} | |
}); | |
testResults.sort((a, b) => b.similarity - a.similarity); | |
if (testResults.length > 0) { | |
output += `✅ Search test: Found ${testResults.length} results\n`; | |
output += `📄 Top result: "${testResults[0].doc.title}" (similarity: ${testResults[0].similarity.toFixed(3)})\n\n`; | |
} | |
if (qaModel) { | |
output += `🤖 Testing QA model...\n`; | |
const context = documents[0].content.substring(0, 500); | |
const testQuestion = "What is artificial intelligence?"; | |
const qaResult = await qaModel(testQuestion, context); | |
output += `✅ QA test: Generated answer with ${(qaResult.score * 100).toFixed(1)}% confidence\n`; | |
output += `💬 Answer: ${qaResult.answer.substring(0, 100)}...\n\n`; | |
} | |
if (llmModel) { | |
output += `🚀 Testing LLM model...\n`; | |
const testPrompt = "Explain artificial intelligence:"; | |
const llmResult = await llmModel(testPrompt, { max_new_tokens: 30, temperature: 0.7, do_sample: true, return_full_text: false }); | |
output += `✅ LLM test: Generated text completion\n`; | |
output += `💬 Generated: "${llmResult[0].generated_text.substring(0, 100)}..."\n\n`; | |
} | |
output += `🎉 All tests passed! System is fully operational.`; | |
} else { | |
output += `⚠️ Models not initialized. Click "Initialize AI Models" first.`; | |
} | |
outputDiv.textContent = output; | |
} catch (error) { | |
console.error('Test error:', error); | |
outputDiv.textContent = `❌ Test failed: ${error.message}`; | |
} finally { | |
testBtn.disabled = false; | |
} | |
} | |
// Initialize UI | |
updateStatus(); | |
// Show version info in console | |
console.log('🤖 AI-Powered RAG System with Transformers.js'); | |
console.log('Models: Xenova/all-MiniLM-L6-v2, Xenova/distilbert-base-cased-distilled-squad'); | |
</script> | |
</body> | |
</html> |