NaimaAqeel commited on
Commit
bc7e185
Β·
verified Β·
1 Parent(s): f1d9fb1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -26
app.py CHANGED
@@ -1,32 +1,57 @@
1
  import streamlit as st
2
- import requests
3
-
4
- API_URL = "http://127.0.0.1:8000/analyze/"
5
-
6
- st.set_page_config(page_title="Sentiment + Emotion + Toxicity Analyzer", layout="centered")
7
-
8
- st.title("🧠 Text Analyzer")
9
- st.write("Analyze text for **Sentiment**, **Emotions**, and **Toxicity** with refinement.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- user_input = st.text_area("Enter your text here:", height=150)
 
12
 
13
  if st.button("Analyze"):
14
- if user_input.strip():
15
- response = requests.post(API_URL, json={"text": user_input})
16
- if response.status_code == 200:
17
- result = response.json()
18
-
19
- st.subheader("πŸ“Š Results")
20
- st.write(f"**Sentiment:** {result['sentiment']} (confidence: {result['sentiment_confidence']:.2f})")
21
-
22
- st.write("### Emotions")
23
- for emo, score in result["emotions"].items():
24
- st.write(f"- {emo}: {score:.2f}")
25
-
26
- st.write("### Toxicity")
27
- for tox, score in result["toxicity"].items():
28
- st.write(f"- {tox}: {score:.2f}")
29
- else:
30
- st.error("Error: Could not analyze text.")
 
 
 
 
 
 
 
 
 
31
  else:
32
  st.warning("Please enter some text to analyze.")
 
1
  import streamlit as st
2
+ from transformers import pipeline
3
+
4
+ st.set_page_config(page_title="Sentiment & Emotion Analyzer", layout="centered")
5
+ st.title("🧠 Text Analyzer (Streamlit)")
6
+
7
+ # Load models
8
+ sentiment_pipeline = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
9
+ emotion_pipeline = pipeline("text-classification", model="bhadresh-savani/distilbert-base-uncased-emotion")
10
+ toxicity_pipeline = pipeline("text-classification", model="martin-ha/toxic-comment-model")
11
+
12
+ # Refinement function
13
+ def refine_predictions(sentiment, sentiment_conf, emotions, toxicity):
14
+ if emotions.get("fear", 0) > 0.8 or emotions.get("anger", 0) > 0.8 or emotions.get("sadness", 0) > 0.8:
15
+ sentiment = "negative"
16
+ if emotions.get("joy", 0) > 0.7:
17
+ sentiment = "positive"
18
+ max_emotion = max(emotions, key=emotions.get)
19
+ if sentiment == "neutral" and emotions[max_emotion] > 0.7:
20
+ if max_emotion == "joy":
21
+ sentiment = "positive"
22
+ else:
23
+ sentiment = "negative"
24
+ return sentiment
25
 
26
+ # UI
27
+ user_input = st.text_area("Enter text here:")
28
 
29
  if st.button("Analyze"):
30
+ if user_input.strip() != "":
31
+ # Sentiment
32
+ sent_res = sentiment_pipeline(user_input)[0]
33
+ sentiment = sent_res["label"].lower()
34
+ sentiment_conf = sent_res["score"]
35
+
36
+ # Emotions
37
+ emo_res = emotion_pipeline(user_input)[0]
38
+ emotions = {e["label"].lower(): e["score"] for e in emo_res}
39
+
40
+ # Toxicity
41
+ tox_res = toxicity_pipeline(user_input)[0]
42
+ toxicity = {t["label"].lower(): t["score"] for t in tox_res}
43
+
44
+ # Refine sentiment based on emotion
45
+ refined_sentiment = refine_predictions(sentiment, sentiment_conf, emotions, toxicity)
46
+
47
+ # Display results
48
+ st.subheader("πŸ“Š Results")
49
+ st.write(f"**Sentiment:** {refined_sentiment} (confidence: {sentiment_conf:.2f})")
50
+ st.write("### Emotions")
51
+ for emo, score in emotions.items():
52
+ st.write(f"- {emo}: {score:.2f}")
53
+ st.write("### Toxicity")
54
+ for tox, score in toxicity.items():
55
+ st.write(f"- {tox}: {score:.2f}")
56
  else:
57
  st.warning("Please enter some text to analyze.")