File size: 3,826 Bytes
71506d5 8b4c3b4 71506d5 8b4c3b4 71506d5 8b4c3b4 836012a 8b4c3b4 71506d5 fceb356 71506d5 fceb356 71506d5 fceb356 71506d5 8b4c3b4 71506d5 8b4c3b4 71506d5 8b4c3b4 71506d5 8b4c3b4 71506d5 8b4c3b4 71506d5 8b4c3b4 496c48a fceb356 71506d5 fceb356 8b4c3b4 71506d5 8b4c3b4 71506d5 8b4c3b4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
import gradio as gr
from transformers import pipeline
from diffusers import StableDiffusionPipeline
import torch
import matplotlib.pyplot as plt
import networkx as nx
import tempfile
import os
# -------- Load Models --------
# Sentence Classification model (you can replace with your fine-tuned model)
classifier = pipeline("text-classification", model="bhadresh-savani/bert-base-uncased-emotion")
# Fill-mask model
fill_mask = pipeline("fill-mask", model="bert-base-uncased")
# NER model
ner = pipeline("ner", model="dslim/bert-base-NER", aggregation_strategy="simple")
# Image generation model (using stable diffusion, make sure you have GPU)
device = "cuda" if torch.cuda.is_available() else "cpu"
image_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", torch_dtype=torch.float16 if device=="cuda" else torch.float32)
image_pipe = image_pipe.to(device)
# -------- Functions --------
# 1. Sentence Classification
def classify_environment_sentence(sentence):
result = classifier(sentence)
return f"Label: {result[0]['label']} (Score: {result[0]['score']:.2f})"
# 2. Image Generation
def generate_image(prompt):
image = image_pipe(prompt).images[0]
# Save to temp file for return
temp_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
image.save(temp_file.name)
return temp_file.name
# 3. Named Entity Recognition + Graph
def ner_graph(text):
entities = ner(text)
G = nx.Graph()
for ent in entities:
G.add_node(ent['word'], label=ent['entity_group'])
G.add_edge("Environment", ent['word'])
# Plot
plt.figure(figsize=(8,6))
nx.draw(G, with_labels=True, node_color='lightgreen', font_weight='bold')
plt.title("NER Entity Graph")
temp_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
plt.savefig(temp_file.name)
plt.close()
return temp_file.name
# 4. Masked Language Modeling
def fill_the_blank(masked_sentence):
results = fill_mask(masked_sentence)
return "\n".join([f"{res['sequence']} (Score: {res['score']:.2f})" for res in results])
# -------- Gradio UI --------
with gr.Blocks(title="Environmental AI Assistant") as demo:
gr.Markdown("## 🌍 Environmental AI Assistant - Powered by Hugging Face 🤗 + Gradio")
with gr.Tab("1️⃣ Sentence Classification"):
sentence_input = gr.Textbox(label="Enter a sentence related to environment", placeholder="e.g. Government launched a campaign to clean rivers.")
sentence_output = gr.Textbox(label="Prediction")
sentence_button = gr.Button("Classify")
sentence_button.click(classify_environment_sentence, sentence_input, sentence_output)
with gr.Tab("2️⃣ Image Generation"):
prompt_input = gr.Textbox(label="Enter environmental image prompt", placeholder="e.g. A polluted beach with plastic waste")
image_output = gr.Image(label="Generated Image")
image_button = gr.Button("Generate Image")
image_button.click(generate_image, prompt_input, image_output)
with gr.Tab("3️⃣ NER with Graph Map"):
ner_input = gr.Textbox(label="Enter text with environmental context", lines=4, placeholder="e.g. UN launched a new program for reducing CO2 emissions in Africa.")
ner_output = gr.Image(label="Entity Graph")
ner_button = gr.Button("Show NER Graph")
ner_button.click(ner_graph, ner_input, ner_output)
with gr.Tab("4️⃣ Fill the Blank (Masked Language Modeling)"):
mask_input = gr.Textbox(label="Sentence with [MASK]", placeholder="e.g. Trees help reduce [MASK] in the atmosphere.")
mask_output = gr.Textbox(label="Predictions")
mask_button = gr.Button("Fill the Blank")
mask_button.click(fill_the_blank, mask_input, mask_output)
demo.launch() |