agentic-browser / src /streamlit_app.py
anu151105's picture
Update Agentic Browser for Hugging Face Spaces
3edfccb
import streamlit as st
import time
import os
import sys
from pathlib import Path
# Add current directory to path if not already there
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
if parent_dir not in sys.path:
sys.path.insert(0, parent_dir)
# Import model manager with proper path handling
try:
from models.model_manager import model_manager
except ImportError:
try:
from src.models.model_manager import model_manager
except ImportError:
st.error("❌ Failed to import model manager. Please check the installation.")
st.stop()
# Note: Page config is set in app.py to avoid conflicts
# Custom CSS for better styling
st.markdown("""
<style>
.stTextInput > div > div > input {
padding: 12px;
border-radius: 8px;
border: 1px solid #e0e0e0;
}
.stButton > button {
width: 100%;
border-radius: 8px;
padding: 8px 16px;
font-weight: 500;
}
.stMarkdown h1 {
color: #1f2937;
margin-bottom: 0.5em;
}
.stMarkdown h3 {
color: #374151;
margin-top: 1.5em;
}
</style>
""", unsafe_allow_html=True)
# Initialize session state
if 'messages' not in st.session_state:
st.session_state.messages = []
st.session_state.model_loaded = False
st.session_state.current_model = None
def load_model(model_name):
"""Load the selected model."""
try:
with st.spinner(f"πŸ”„ Loading {model_name} model... This might take a few minutes on first load."):
# Show some progress info
progress_text = st.empty()
progress_text.text("Initializing model manager...")
model_manager.load_model(model_name)
progress_text.text("Model loaded successfully!")
st.session_state.model_loaded = True
st.session_state.current_model = model_name
st.success(f"βœ… Successfully loaded {model_name} model!")
return True
except Exception as e:
error_msg = str(e)
st.error(f"❌ Error loading model: {error_msg}")
# Provide helpful suggestions
if "OutOfMemoryError" in error_msg or "CUDA out of memory" in error_msg:
st.info("πŸ’‘ Try using the 'tiny-llama' model which requires less memory.")
elif "Connection" in error_msg or "timeout" in error_msg:
st.info("πŸ’‘ Check your internet connection. Model downloads require a stable connection.")
else:
st.info("πŸ’‘ Try refreshing the page or selecting a different model.")
return False
def generate_response(prompt, model_name, temperature=0.7):
"""Generate a response using the selected model."""
try:
# Check if model is loaded
if not st.session_state.model_loaded or st.session_state.current_model != model_name:
st.info(f"πŸ”„ Model not loaded. Loading {model_name}...")
if not load_model(model_name):
return "❌ Error: Failed to load model. Please try again or select a different model."
# Show generating status
with st.spinner("πŸ€” Generating response..."):
# Generate response
response = model_manager.generate_text(
model_name=model_name,
prompt=prompt,
temperature=temperature,
max_length=1024
)
if not response or response.strip() == "":
return "⚠️ The model generated an empty response. Please try rephrasing your question."
return response
except Exception as e:
error_msg = str(e)
st.error(f"❌ Error generating response: {error_msg}")
# Provide helpful suggestions based on error type
if "OutOfMemoryError" in error_msg:
return "πŸ’” Out of memory error. Try using a shorter prompt or the 'tiny-llama' model."
elif "timeout" in error_msg.lower():
return "⏰ Request timed out. Please try again with a shorter prompt."
else:
return f"❌ An error occurred while generating the response. Please try again.\n\nTechnical details: {error_msg}"
# Sidebar for settings
with st.sidebar:
st.title("βš™οΈ Settings")
# Model selection
selected_model = st.selectbox(
"Select Model",
["tiny-llama", "mistral-7b"],
index=0,
help="Select the model to use for text generation"
)
# Temperature slider
temperature = st.slider(
"Temperature",
min_value=0.1,
max_value=1.0,
value=0.7,
step=0.1,
help="Controls randomness in the response generation. Lower = more deterministic, Higher = more creative"
)
# Load model button
if st.button("πŸ”„ Load Model", type="primary"):
load_model(selected_model)
# Clear chat button
if st.button("πŸ—‘οΈ Clear Chat"):
st.session_state.messages = []
st.rerun()
# Model status
if st.session_state.model_loaded:
st.success(f"βœ… Current: {st.session_state.current_model}")
else:
st.warning("⚠️ No model loaded")
st.markdown("---")
st.markdown("### About")
st.markdown("""
**Agentic Browser** is an AI-powered chat assistant using open-source language models.
πŸš€ **Features:**
- Multiple model options
- Real-time chat interface
- Adjustable creativity settings
- No data collection
""")
st.markdown("---")
st.markdown("### Models")
st.markdown("""
- **TinyLlama**: Fast and lightweight (1.1B parameters)
- **DialoGPT**: Conversational AI optimized for chat
πŸ’‘ **Tip:** Start with TinyLlama for faster responses!
""")
# Main chat interface
st.title("πŸ€– Agentic Browser")
st.caption("Powered by open-source AI models")
# Show status
if st.session_state.model_loaded:
st.success(f"βœ… Model loaded: {st.session_state.current_model}")
else:
st.info("ℹ️ Please select and load a model from the sidebar to start chatting.")
# Display chat messages
if st.session_state.messages:
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
else:
# Show welcome message
st.markdown("""
### πŸ‘‹ Welcome to Agentic Browser!
This is an AI-powered chat interface using open-source language models.
**Getting Started:**
1. πŸ‘ˆ Select a model from the sidebar
2. Click "πŸ”„ Load Model" to initialize it
3. Start typing your message below
**Available Models:**
- **TinyLlama**: Fast and efficient, great for quick responses
- **DialoGPT**: Optimized for conversations
Try asking questions like:
- "Explain quantum computing in simple terms"
- "Write a short story about a robot"
- "Help me plan a vacation"
""")
# Chat input
if prompt := st.chat_input("Type your message here...", disabled=not st.session_state.model_loaded):
if not st.session_state.model_loaded:
st.warning("⚠️ Please load a model first before chatting.")
st.stop()
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message
with st.chat_message("user"):
st.markdown(prompt)
# Generate and display assistant response
with st.chat_message("assistant"):
message_placeholder = st.empty()
# Generate response
response = generate_response(prompt, selected_model, temperature)
# Check if response is valid
if response and not response.startswith("❌") and not response.startswith("πŸ’”"):
# Display response with streaming effect
full_response = ""
words = response.split()
for i, word in enumerate(words):
full_response += word + " "
time.sleep(0.05) # Adjust speed as needed
# Show cursor while typing
cursor = "β–Œ" if i < len(words) - 1 else ""
message_placeholder.markdown(full_response + cursor)
# Final message without cursor
message_placeholder.markdown(full_response.strip())
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": full_response.strip()})
else:
# Display error message directly
message_placeholder.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})
# Auto-scroll to bottom
st.rerun()
def main():
"""Main function to run the Streamlit app."""
# The Streamlit app is already defined above, so this function just serves as an entry point
pass
if __name__ == "__main__":
# The Streamlit app runs automatically when the script is executed
pass