Spaces:
Build error
Build error
Update src/streamlit_app.py
Browse files- src/streamlit_app.py +110 -34
src/streamlit_app.py
CHANGED
@@ -1,40 +1,116 @@
|
|
1 |
-
import altair as alt
|
2 |
-
import numpy as np
|
3 |
-
import pandas as pd
|
4 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
12 |
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
"""
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
from io import BytesIO
|
4 |
+
import json
|
5 |
+
from gpt4all import GPT4All
|
6 |
+
from transformers import pipeline
|
7 |
|
8 |
+
# Page setup
|
9 |
+
st.set_page_config(page_title="Top Task Prioritizer with AI", layout="wide")
|
10 |
+
st.title("π Top 3 Task Prioritizer + AI JSON Analysis")
|
11 |
+
|
12 |
+
# Priority mapping
|
13 |
+
priority_map = {"High": 3, "Medium": 2, "Low": 1, "N/A": 0}
|
14 |
+
|
15 |
+
# Load GPT4All model
|
16 |
+
@st.cache_resource
|
17 |
+
def load_gpt4all_model():
|
18 |
+
model_path = "C:\\Users\\puttpav\\OneDrive - acuitykp\\Joshua Cyril's files - AI Team\\models"
|
19 |
+
return GPT4All(
|
20 |
+
model_name="Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf",
|
21 |
+
allow_download=False,
|
22 |
+
device="cpu",
|
23 |
+
model_path=model_path,
|
24 |
+
)
|
25 |
+
|
26 |
+
# Load Qwen model
|
27 |
+
@st.cache_resource
|
28 |
+
def load_qwen_model():
|
29 |
+
return pipeline("text-generation", model="Qwen/Qwen3-4B-Instruct-2507")
|
30 |
+
|
31 |
+
# Load models
|
32 |
+
gpt4all_model = load_gpt4all_model()
|
33 |
+
qwen_pipe = load_qwen_model()
|
34 |
+
|
35 |
+
# Suggest action based on priority and effort
|
36 |
+
def suggest_action(row):
|
37 |
+
if row["Priority Score"] == 3 and row["Time Required (hrs)"] <= 20:
|
38 |
+
return "β
Do First"
|
39 |
+
elif row["Priority Score"] >= 2 and row["Time Required (hrs)"] <= 30:
|
40 |
+
return "π
Schedule"
|
41 |
+
elif row["Priority Score"] >= 1:
|
42 |
+
return "π€ Delegate"
|
43 |
+
else:
|
44 |
+
return "π Defer"
|
45 |
+
|
46 |
+
# File upload
|
47 |
+
uploaded_file = st.file_uploader("π Upload your Excel file", type=["xlsx"])
|
48 |
+
|
49 |
+
if uploaded_file:
|
50 |
+
df = pd.read_excel(uploaded_file)
|
51 |
|
52 |
+
required_columns = ["Date", "Task", "Priority", "Time Required (hrs)"]
|
53 |
+
if all(col in df.columns for col in required_columns):
|
54 |
+
df["Priority Score"] = df["Priority"].map(priority_map)
|
55 |
+
df["Final Score"] = df["Priority Score"] * 10 - df["Time Required (hrs)"]
|
56 |
+
df["Suggested Action"] = df.apply(suggest_action, axis=1)
|
57 |
+
df["Date"] = df["Date"].astype(str)
|
58 |
|
59 |
+
task_json = df[["Date", "Task", "Priority", "Time Required (hrs)", "Suggested Action"]].to_dict(orient="records")
|
60 |
+
|
61 |
+
st.success("β
Tasks processed and ready for AI analysis!")
|
62 |
+
st.dataframe(df, use_container_width=True)
|
63 |
+
|
64 |
+
# Prompt for AI
|
65 |
+
prompt = f"""
|
66 |
+
You are a task prioritization assistant. You will be given a list of tasks in JSON format. Your job is to select the top 3 most important tasks ONLY from the provided list based on their priority and time required.
|
67 |
+
|
68 |
+
Do NOT invent or add any new tasks. Only choose from the tasks listed below and then show the recommendation for each task.
|
69 |
+
|
70 |
+
Respond ONLY with a valid JSON array of EXACTLY 3 tasks. Each object should include:
|
71 |
+
- Task Name
|
72 |
+
- Priority Level
|
73 |
+
- Time Required
|
74 |
+
- Suggested Action
|
75 |
+
- Recommendation (e.g., break into subtasks, delegate, schedule early)
|
76 |
+
|
77 |
+
Tasks:
|
78 |
+
{json.dumps(task_json, indent=2)}
|
79 |
"""
|
80 |
|
81 |
+
# Model selection
|
82 |
+
model_choice = st.selectbox("π€ Choose AI Model", ["GPT4All", "Qwen"])
|
83 |
+
|
84 |
+
if st.button("π Analyze with Selected AI"):
|
85 |
+
with st.spinner(f"Analyzing with {model_choice}..."):
|
86 |
+
if model_choice == "GPT4All":
|
87 |
+
response_text = ""
|
88 |
+
with gpt4all_model.chat_session():
|
89 |
+
for token in gpt4all_model.generate(prompt, max_tokens=1024):
|
90 |
+
response_text += token
|
91 |
+
else:
|
92 |
+
response_text = qwen_pipe(prompt, max_new_tokens=1024)[0]["generated_text"]
|
93 |
+
|
94 |
+
try:
|
95 |
+
json_start = response_text.find("[")
|
96 |
+
json_end = response_text.rfind("]") + 1
|
97 |
+
json_str = response_text[json_start:json_end]
|
98 |
+
parsed_json = json.loads(json_str)
|
99 |
+
|
100 |
+
result_df = pd.DataFrame(parsed_json)
|
101 |
+
st.success(f"β
Top 3 tasks selected by {model_choice}")
|
102 |
+
st.dataframe(result_df, use_container_width=True)
|
103 |
+
|
104 |
+
output = BytesIO()
|
105 |
+
result_df.to_excel(output, index=False)
|
106 |
+
st.download_button(
|
107 |
+
label="π₯ Download Top 3 Tasks (AI Selected)",
|
108 |
+
data=output.getvalue(),
|
109 |
+
file_name=f"top_prioritized_tasks_by_{model_choice.lower()}.xlsx",
|
110 |
+
mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
|
111 |
+
)
|
112 |
+
except Exception as e:
|
113 |
+
st.error(f"β Failed to parse JSON from {model_choice} response.")
|
114 |
+
st.text_area("Raw AI Output", value=response_text, height=700)
|
115 |
+
else:
|
116 |
+
st.error(f"Missing required columns. Please include: {', '.join(required_columns)}")
|