hanzla commited on
Commit
71eba9d
·
verified ·
1 Parent(s): 0911831

Implement Gemini 2.5 Flash API integration for medical chatbot

Browse files

- Added Google Gemini 2.5 Flash API integration
- Implemented secure API key management via environment variables
- Added sidebar option for API key configuration
- Replaced placeholder response logic with live API calls
- Configured temperature and max tokens parameters for API requests
- Added comprehensive error handling with fallback responses
- Updated model selection to include Gemini options
- Improved user interface with loading spinner and enhanced descriptions

Files changed (1) hide show
  1. main.py +111 -32
main.py CHANGED
@@ -1,5 +1,6 @@
1
  import streamlit as st
2
  import os
 
3
  from typing import Optional
4
 
5
  # Set page configuration
@@ -18,17 +19,40 @@ def main():
18
  st.markdown(
19
  """
20
  Welcome to the Medical Q/A Chatbot! This application provides informational responses
21
- to medical questions. Please note that this is for educational purposes only and should
22
- not replace professional medical advice.
23
  """
24
  )
25
 
26
  # Sidebar configuration
27
  with st.sidebar:
28
  st.header("Configuration")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  model_choice = st.selectbox(
30
  "Select Model",
31
- ["GPT-3.5", "GPT-4", "Claude", "Local Model"],
32
  index=0
33
  )
34
 
@@ -37,7 +61,8 @@ def main():
37
  min_value=0.0,
38
  max_value=1.0,
39
  value=0.7,
40
- step=0.1
 
41
  )
42
 
43
  max_tokens = st.number_input(
@@ -45,7 +70,8 @@ def main():
45
  min_value=100,
46
  max_value=4000,
47
  value=500,
48
- step=100
 
49
  )
50
 
51
  # Chat interface
@@ -71,8 +97,9 @@ def main():
71
 
72
  # Generate and display assistant response
73
  with st.chat_message("assistant"):
74
- response = generate_medical_response(prompt, model_choice, temperature, max_tokens)
75
- st.markdown(response)
 
76
 
77
  # Add assistant response to chat history
78
  st.session_state.messages.append({"role": "assistant", "content": response})
@@ -84,34 +111,86 @@ def main():
84
 
85
  def generate_medical_response(question: str, model: str, temperature: float, max_tokens: int) -> str:
86
  """
87
- Generate a medical response based on the user's question.
88
- This is a placeholder function that would integrate with actual AI models.
89
  """
90
 
91
  # Disclaimer message
92
- disclaimer = """\n\n**Disclaimer**: This response is for informational purposes only and should not replace professional medical advice, diagnosis, or treatment. Always consult with a qualified healthcare provider for medical concerns."""
93
-
94
- # Placeholder response - in a real implementation, this would call an AI model
95
- response = f"""
96
- Thank you for your medical question: "{question}"
97
-
98
- I understand you're seeking medical information. While I'd like to help, I'm currently a template
99
- application that needs to be configured with proper medical AI models and knowledge bases.
100
-
101
- To get this chatbot fully functional, you would need to:
102
 
103
- 1. **Integrate AI Models**: Connect to medical AI models (like BioBERT, ClinicalBERT, or specialized medical LLMs)
104
- 2. **Add Medical Knowledge Base**: Include verified medical databases and references
105
- 3. **Implement Safety Filters**: Add content moderation for medical accuracy
106
- 4. **Add Authentication**: Consider user verification for sensitive medical queries
107
-
108
- **Current Configuration:**
109
- - Model: {model}
110
- - Temperature: {temperature}
111
- - Max Tokens: {max_tokens}
112
- """
113
-
114
- return response + disclaimer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
116
  if __name__ == "__main__":
117
- main()
 
1
  import streamlit as st
2
  import os
3
+ import google.generativeai as genai
4
  from typing import Optional
5
 
6
  # Set page configuration
 
19
  st.markdown(
20
  """
21
  Welcome to the Medical Q/A Chatbot! This application provides informational responses
22
+ to medical questions using Google's Gemini 2.5 Flash API. Please note that this is for
23
+ educational purposes only and should not replace professional medical advice.
24
  """
25
  )
26
 
27
  # Sidebar configuration
28
  with st.sidebar:
29
  st.header("Configuration")
30
+
31
+ # API Key configuration
32
+ st.subheader("API Settings")
33
+ api_key = st.text_input(
34
+ "Gemini API Key",
35
+ type="password",
36
+ help="Enter your Google Gemini API key",
37
+ placeholder="AIzaSy..."
38
+ )
39
+
40
+ # If API key is provided, set it as environment variable
41
+ if api_key:
42
+ os.environ['GEMINI_API_KEY'] = api_key
43
+ st.success("API Key configured successfully!")
44
+ else:
45
+ # Use default API key from environment if available
46
+ default_key = os.getenv('GEMINI_API_KEY', 'AIzaSyBEyc7iQCLXfry6V7pA0TDR1k0eriX_nDo')
47
+ if default_key:
48
+ os.environ['GEMINI_API_KEY'] = default_key
49
+ st.info("Using default API key from environment")
50
+
51
+ st.divider()
52
+
53
  model_choice = st.selectbox(
54
  "Select Model",
55
+ ["Gemini 2.5 Flash", "Gemini Pro"],
56
  index=0
57
  )
58
 
 
61
  min_value=0.0,
62
  max_value=1.0,
63
  value=0.7,
64
+ step=0.1,
65
+ help="Controls randomness in responses. Lower values are more focused and deterministic."
66
  )
67
 
68
  max_tokens = st.number_input(
 
70
  min_value=100,
71
  max_value=4000,
72
  value=500,
73
+ step=100,
74
+ help="Maximum number of tokens in the response."
75
  )
76
 
77
  # Chat interface
 
97
 
98
  # Generate and display assistant response
99
  with st.chat_message("assistant"):
100
+ with st.spinner("Thinking..."):
101
+ response = generate_medical_response(prompt, model_choice, temperature, max_tokens)
102
+ st.markdown(response)
103
 
104
  # Add assistant response to chat history
105
  st.session_state.messages.append({"role": "assistant", "content": response})
 
111
 
112
  def generate_medical_response(question: str, model: str, temperature: float, max_tokens: int) -> str:
113
  """
114
+ Generate a medical response using Google's Gemini API.
 
115
  """
116
 
117
  # Disclaimer message
118
+ disclaimer = "\n\n**Disclaimer**: This response is for informational purposes only and should not replace professional medical advice, diagnosis, or treatment. Always consult with a qualified healthcare provider for medical concerns."
 
 
 
 
 
 
 
 
 
119
 
120
+ try:
121
+ # Get API key from environment
122
+ api_key = os.getenv('GEMINI_API_KEY')
123
+ if not api_key:
124
+ return "Error: No API key configured. Please set your Gemini API key in the sidebar." + disclaimer
125
+
126
+ # Configure the API
127
+ genai.configure(api_key=api_key)
128
+
129
+ # Select model based on choice
130
+ model_name = "gemini-2.5-flash" if "Flash" in model else "gemini-pro"
131
+
132
+ # Create the model with configuration
133
+ generation_config = {
134
+ "temperature": temperature,
135
+ "top_p": 1,
136
+ "top_k": 40,
137
+ "max_output_tokens": max_tokens,
138
+ }
139
+
140
+ model_instance = genai.GenerativeModel(
141
+ model_name=model_name,
142
+ generation_config=generation_config
143
+ )
144
+
145
+ # Create a medical-focused prompt
146
+ medical_prompt = f"""
147
+ You are a knowledgeable medical AI assistant. Please provide an informative and helpful response to the following medical question.
148
+ Your response should be:
149
+ - Medically accurate and evidence-based
150
+ - Clear and easy to understand
151
+ - Comprehensive but concise
152
+ - Include relevant medical terminology with explanations
153
+ - Always emphasize when professional medical consultation is needed
154
+
155
+ Question: {question}
156
+
157
+ Please provide a detailed medical response while always reminding the user that this is for educational purposes only and cannot replace professional medical advice.
158
+ """
159
+
160
+ # Generate response
161
+ response = model_instance.generate_content(medical_prompt)
162
+
163
+ if response and response.text:
164
+ return response.text + disclaimer
165
+ else:
166
+ return "I apologize, but I couldn't generate a response at this time. Please try again or rephrase your question." + disclaimer
167
+
168
+ except Exception as e:
169
+ error_msg = f"Error connecting to Gemini API: {str(e)}"
170
+
171
+ # Provide fallback response with helpful information
172
+ fallback_response = f"""
173
+ {error_msg}
174
+
175
+ I understand you're seeking medical information about: "{question}"
176
+
177
+ While I'm currently unable to provide a detailed response due to technical issues, I recommend:
178
+
179
+ 1. **For urgent medical concerns**: Contact your healthcare provider immediately or call emergency services
180
+ 2. **For general health questions**: Consult with your primary care physician
181
+ 3. **For medication questions**: Speak with a pharmacist or your prescribing doctor
182
+ 4. **For reliable medical information**: Visit reputable sources like:
183
+ - Mayo Clinic (mayoclinic.org)
184
+ - WebMD (webmd.com)
185
+ - MedlinePlus (medlineplus.gov)
186
+
187
+ **Current Configuration:**
188
+ - Model: {model}
189
+ - Temperature: {temperature}
190
+ - Max Tokens: {max_tokens}
191
+ """
192
+
193
+ return fallback_response + disclaimer
194
 
195
  if __name__ == "__main__":
196
+ main()