Files changed (1) hide show
  1. app.py +265 -268
app.py CHANGED
@@ -1,289 +1,286 @@
 
 
1
  import os
2
  import json
3
- import logging
4
- from datetime import datetime
5
  import gradio as gr
6
- from datasets import list_datasets, load_dataset
7
- from huggingface_hub import HfApi, HfFolder
8
  from transformers import (
9
- AutoModelForCausalLM,
10
- AutoTokenizer,
11
- Trainer,
12
- TrainingArguments,
13
- DataCollatorForLanguageModeling
14
- )
15
-
16
- # ===============================
17
- # Setup directories & logging
18
- # ===============================
19
- BASE_DIR = "storage"
20
- MEMORY_DIR = os.path.join(BASE_DIR, "memory")
21
- LOG_FILE = os.path.join(BASE_DIR, "logs.txt")
22
-
23
- os.makedirs(MEMORY_DIR, exist_ok=True)
24
- os.makedirs(BASE_DIR, exist_ok=True)
25
-
26
- logging.basicConfig(
27
- filename=LOG_FILE,
28
- level=logging.INFO,
29
- format="%(asctime)s - %(levelname)s - %(message)s"
30
  )
31
-
32
- def log_event(event: str):
33
- """Log both to file and console"""
34
- logging.info(event)
35
- print(event)
36
-
37
- # ===============================
38
- # Hugging Face Auto-fetch
39
- # ===============================
40
- def fetch_top_models(limit=10):
41
- """Fetch top models from Hugging Face Hub"""
42
- api = HfApi()
43
- models = api.list_models(sort="downloads", limit=limit)
44
- return [m.modelId for m in models]
45
-
46
- def fetch_top_datasets(limit=10):
47
- """Fetch top datasets from Hugging Face Hub"""
48
- api = HfApi()
49
- datasets = api.list_datasets(sort="downloads", limit=limit)
50
- return [d.id for d in datasets]
51
-
52
- TOP_MODELS = fetch_top_models()
53
- TOP_DATASETS = fetch_top_datasets()
54
-
55
- # ===============================
56
- # Memory Management
57
- # ===============================
58
  def get_memory_file(model_name):
59
- return os.path.join(MEMORY_DIR, f"{model_name.replace('/', '_')}_memory.json")
 
60
 
61
  def load_memory(model_name):
62
- f = get_memory_file(model_name)
63
- if os.path.exists(f):
64
- with open(f, "r") as file:
65
- return json.load(file)
66
  return []
67
 
68
- def save_memory(model_name, messages):
69
- f = get_memory_file(model_name)
70
- with open(f, "w") as file:
71
- json.dump(messages, file, indent=2)
72
-
73
- # ===============================
74
- # Chat Functionality
75
- # ===============================
76
- def chat_with_model(user_input, model_choice):
77
- if not model_choice:
78
- return "โŒ Please select a model.", ""
79
-
80
- log_event(f"User chatting with {model_choice}: {user_input}")
81
- tokenizer = AutoTokenizer.from_pretrained(model_choice)
82
- model = AutoModelForCausalLM.from_pretrained(model_choice)
83
-
84
- inputs = tokenizer(user_input, return_tensors="pt")
85
- outputs = model.generate(**inputs, max_length=200)
86
-
87
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
88
-
89
- memory = load_memory(model_choice)
90
- memory.append({"user": user_input, "bot": response})
91
- save_memory(model_choice, memory)
92
-
93
- return response, json.dumps(memory, indent=2)
94
-
95
- # ===============================
96
- # Training
97
- # ===============================
98
- def train_model(model_name, dataset_name, epochs, output_dir):
99
- try:
100
- log_event(f"Starting training: model={model_name}, dataset={dataset_name}, epochs={epochs}")
101
- dataset = load_dataset(dataset_name, split="train")
102
-
103
- tokenizer = AutoTokenizer.from_pretrained(model_name)
104
- model = AutoModelForCausalLM.from_pretrained(model_name)
105
-
106
- def tokenize_function(examples):
107
- return tokenizer(examples["text"], truncation=True, padding="max_length", max_length=128)
108
-
109
- tokenized_dataset = dataset.map(tokenize_function, batched=True)
110
-
111
- data_collator = DataCollatorForLanguageModeling(
112
- tokenizer=tokenizer,
113
- mlm=False
114
- )
115
-
116
- training_args = TrainingArguments(
117
- output_dir=output_dir,
118
- overwrite_output_dir=True,
119
- num_train_epochs=int(epochs),
120
- per_device_train_batch_size=2,
121
- save_steps=500,
122
- save_total_limit=2,
123
- logging_dir="./logs",
124
- logging_steps=50
125
- )
126
-
127
- trainer = Trainer(
128
- model=model,
129
- args=training_args,
130
- train_dataset=tokenized_dataset,
131
- data_collator=data_collator
132
- )
133
-
134
- trainer.train()
135
- model.save_pretrained(output_dir)
136
- tokenizer.save_pretrained(output_dir)
137
-
138
- log_event(f"โœ… Training completed. Model saved to {output_dir}")
139
- return f"โœ… Training completed. Model saved to {output_dir}"
140
- except Exception as e:
141
- log_event(f"โŒ Training failed: {e}")
142
- return f"โŒ Error during training: {str(e)}"
143
-
144
- # ===============================
145
- # Gradio UI โ€“ Training Tab
146
- # ===============================
147
- with gr.Blocks() as training_tab:
148
- gr.Markdown("## ๐Ÿ“š Train a Custom Model")
149
- with gr.Row():
150
- model_dropdown = gr.Dropdown(choices=TOP_MODELS, label="Choose Model", interactive=True)
151
- dataset_dropdown = gr.Dropdown(choices=TOP_DATASETS, label="Choose Dataset", interactive=True)
152
- with gr.Row():
153
- model_text = gr.Textbox(label="Or enter custom model ID", placeholder="e.g. gpt2")
154
- dataset_text = gr.Textbox(label="Or enter custom dataset ID", placeholder="e.g. wikitext")
155
- epochs = gr.Number(value=1, label="Epochs")
156
- output_dir = gr.Textbox(value="./trained_model", label="Output Directory")
157
- train_btn = gr.Button("๐Ÿš€ Start Training")
158
- train_output = gr.Textbox(label="Training Status")
159
-
160
- def handle_train(model_d, model_t, dataset_d, dataset_t, epochs, output_dir):
161
- model = model_t if model_t else model_d
162
- dataset = dataset_t if dataset_t else dataset_d
163
- return train_model(model, dataset, epochs, output_dir)
164
-
165
- train_btn.click(
166
- fn=handle_train,
167
- inputs=[model_dropdown, model_text, dataset_dropdown, dataset_text, epochs, output_dir],
168
- outputs=train_output
169
  )
170
 
171
- # ===============================
172
- # Gradio UI โ€“ Chat Tab
173
- # ===============================
174
- with gr.Blocks() as chat_tab:
175
- gr.Markdown("## ๐Ÿ’ฌ Chat with Model")
176
- with gr.Row():
177
- chat_model_dropdown = gr.Dropdown(choices=TOP_MODELS, label="Choose Model", interactive=True)
178
- chat_model_text = gr.Textbox(label="Or enter custom model ID", placeholder="e.g. gpt2")
179
- with gr.Row():
180
- chat_input = gr.Textbox(label="Your Message")
181
- send_btn = gr.Button("Send")
182
- chat_output = gr.Textbox(label="Model Response")
183
- memory_display = gr.Textbox(label="Conversation Memory", interactive=False)
184
-
185
- def handle_chat(user_input, model_d, model_t):
186
- model = model_t if model_t else model_d
187
- return chat_with_model(user_input, model)
188
-
189
- send_btn.click(
190
- fn=handle_chat,
191
- inputs=[chat_input, chat_model_dropdown, chat_model_text],
192
- outputs=[chat_output, memory_display]
 
 
 
193
  )
194
 
195
- # ===============================
196
- # Gradio UI โ€“ Memory Tab
197
- # ===============================
198
- with gr.Blocks() as memory_tab:
199
- gr.Markdown("## ๐Ÿง  Manage Memory")
200
-
201
- with gr.Row():
202
- memory_model_dropdown = gr.Dropdown(choices=TOP_MODELS, label="Select Model")
203
- memory_model_text = gr.Textbox(label="Or enter custom model ID")
204
-
205
- memory_output = gr.Textbox(label="Stored Memory", interactive=False)
206
- load_btn = gr.Button("๐Ÿ“‚ Load Memory")
207
- clear_btn = gr.Button("๐Ÿ—‘๏ธ Clear Memory")
208
-
209
- def handle_load(model_d, model_t):
210
- model = model_t if model_t else model_d
211
- memory = load_memory(model)
212
- return json.dumps(memory, indent=2)
213
-
214
- def handle_clear(model_d, model_t):
215
- model = model_t if model_t else model_d
216
- f = get_memory_file(model)
217
- if os.path.exists(f):
218
- os.remove(f)
219
- log_event(f"Cleared memory for {model}")
220
- return "โœ… Memory cleared."
221
- return "โš ๏ธ No memory found."
222
-
223
- load_btn.click(
224
- fn=handle_load,
225
- inputs=[memory_model_dropdown, memory_model_text],
226
- outputs=memory_output
227
- )
228
-
229
- clear_btn.click(
230
- fn=handle_clear,
231
- inputs=[memory_model_dropdown, memory_model_text],
232
- outputs=memory_output
233
- )
234
 
235
- # ===============================
236
- # Gradio UI โ€“ Logs Tab
237
- # ===============================
238
- with gr.Blocks() as logs_tab:
239
- gr.Markdown("## ๐Ÿ“œ Application Logs")
240
- log_display = gr.Textbox(value=open(LOG_FILE).read() if os.path.exists(LOG_FILE) else "No logs yet.", lines=20)
241
 
242
- refresh_btn = gr.Button("๐Ÿ”„ Refresh Logs")
 
243
 
244
- def refresh_logs():
245
- return open(LOG_FILE).read() if os.path.exists(LOG_FILE) else "No logs yet."
246
 
247
- refresh_btn.click(
248
- fn=refresh_logs,
249
- outputs=log_display
250
- )
251
 
252
- # ===============================
253
- # Gradio UI โ€“ Help Tab
254
- # ===============================
255
- with gr.Blocks() as help_tab:
256
- gr.Markdown("## ๐Ÿ“– Help & User Manual")
257
-
258
- gr.Markdown("""
259
- ### ๐Ÿ”น Beginner Guide
260
- 1. Go to **Train a Model** tab โ†’ pick a model & dataset or enter custom IDs.
261
- 2. Choose number of epochs & output directory โ†’ click **Start Training**.
262
- 3. Once training completes, the model is saved and can be used later.
263
- 4. Go to **Chat with Model** tab โ†’ type your message or test the trained model.
264
- 5. Conversation is auto-saved per model (see **Memory** tab).
265
- 6. Use **Logs** tab for detailed runtime events.
266
-
267
- ### ๐Ÿ”น Technical Details
268
- - Models/Datasets pulled live from Hugging Face Hub (top 10 auto-fetched).
269
- - Memory stored in `/storage/memory/` as JSON files (per model).
270
- - Logs stored in `/storage/logs.txt`.
271
- - Training uses ๐Ÿค— Transformers `Trainer` API with causal LM objective.
272
- - Safe checks auto-create missing directories & files.
273
-
274
- ### ๐Ÿ”น Tips
275
- - For large datasets, train on GPU (CPU will be very slow).
276
- - Memory files can be manually edited in `/storage/memory/`.
277
- - You can load any public Hugging Face dataset/model by entering its ID.
278
- """)
279
-
280
- # ===============================
281
- # Final Tabbed Interface
282
- # ===============================
283
- demo = gr.TabbedInterface(
284
- [training_tab, chat_tab, memory_tab, logs_tab, help_tab],
285
- ["Train a Model", "Chat", "Memory", "Logs", "Help & Manual"]
286
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
  if __name__ == "__main__":
289
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
1
+ # app.py (Part 1 of 2)
2
+
3
  import os
4
  import json
5
+ import datetime
 
6
  import gradio as gr
 
 
7
  from transformers import (
8
+ AutoTokenizer, AutoModelForSequenceClassification,
9
+ Trainer, TrainingArguments
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  )
11
+ from datasets import load_dataset
12
+ import torch
13
+
14
+ # =========================
15
+ # Ensure directories exist
16
+ # =========================
17
+ os.makedirs("trained_models", exist_ok=True)
18
+ os.makedirs("logs", exist_ok=True)
19
+ os.makedirs("memory", exist_ok=True)
20
+
21
+ # =========================
22
+ # Utility: Memory System
23
+ # =========================
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  def get_memory_file(model_name):
25
+ safe_name = model_name.replace("/", "_")
26
+ return os.path.join("memory", f"memory_{safe_name}.json")
27
 
28
  def load_memory(model_name):
29
+ file = get_memory_file(model_name)
30
+ if os.path.exists(file):
31
+ with open(file, "r") as f:
32
+ return json.load(f)
33
  return []
34
 
35
+ def save_memory(model_name, conversation):
36
+ file = get_memory_file(model_name)
37
+ memory = load_memory(model_name)
38
+ memory.append(conversation)
39
+ with open(file, "w") as f:
40
+ json.dump(memory, f, indent=2)
41
+
42
+ # =========================
43
+ # Utility: Logging
44
+ # =========================
45
+ def log_event(event):
46
+ log_file = os.path.join("logs", "events.log")
47
+ with open(log_file, "a") as f:
48
+ f.write(f"[{datetime.datetime.now()}] {event}\n")
49
+
50
+ # =========================
51
+ # Training Pipeline
52
+ # =========================
53
+ def train_model(model_name, dataset_name, epochs, output_dir="trained_models"):
54
+ log_event(f"Training started: model={model_name}, dataset={dataset_name}, epochs={epochs}")
55
+
56
+ # Load tokenizer + dataset
57
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
58
+ dataset = load_dataset(dataset_name, split="train[:200]") # smaller subset for CPU
59
+
60
+ def tokenize_fn(batch):
61
+ return tokenizer(batch["text"], truncation=True, padding="max_length", max_length=128)
62
+
63
+ dataset = dataset.map(tokenize_fn, batched=True)
64
+ dataset = dataset.rename_column("label", "labels")
65
+ dataset.set_format("torch", columns=["input_ids", "attention_mask", "labels"])
66
+
67
+ # Load model
68
+ model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2)
69
+
70
+ # Training arguments
71
+ training_args = TrainingArguments(
72
+ output_dir=os.path.join(output_dir, model_name.replace("/", "_")),
73
+ overwrite_output_dir=True,
74
+ evaluation_strategy="epoch",
75
+ save_strategy="epoch",
76
+ num_train_epochs=epochs,
77
+ per_device_train_batch_size=8,
78
+ logging_dir="./logs",
79
+ logging_steps=10,
80
+ report_to="none", # prevent wandb errors
81
+ no_cuda=True # force CPU
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  )
83
 
84
+ # Progress tracking
85
+ progress = {"epoch": 0, "loss": []}
86
+
87
+ def compute_metrics(eval_pred):
88
+ logits, labels = eval_pred
89
+ preds = logits.argmax(-1)
90
+ acc = (preds == labels).astype(float).mean().item()
91
+ return {"accuracy": acc}
92
+
93
+ def log_callback(trainer, state, control, **kwargs):
94
+ if state.is_local_process_zero and state.log_history:
95
+ last_log = state.log_history[-1]
96
+ if "loss" in last_log:
97
+ progress["epoch"] = state.epoch
98
+ progress["loss"].append(last_log["loss"])
99
+ log_event(f"Epoch {state.epoch} - Loss: {last_log['loss']}")
100
+
101
+ # Trainer
102
+ trainer = Trainer(
103
+ model=model,
104
+ args=training_args,
105
+ train_dataset=dataset,
106
+ tokenizer=tokenizer,
107
+ compute_metrics=compute_metrics,
108
+ callbacks=[log_callback]
109
  )
110
 
111
+ trainer.train()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
+ # Save trained model
114
+ save_dir = os.path.join(output_dir, model_name.replace("/", "_"))
115
+ model.save_pretrained(save_dir)
116
+ tokenizer.save_pretrained(save_dir)
 
 
117
 
118
+ log_event(f"Training finished: model saved at {save_dir}")
119
+ return f"โœ… Training complete. Model saved at {save_dir}", progress
120
 
121
+ # app.py (Part 2 of 2) โ€” UI
 
122
 
123
+ import gradio as gr
 
 
 
124
 
125
+ # =========================
126
+ # Hugging Face Top 10 (demo defaults, can expand to auto-fetch later)
127
+ # =========================
128
+ TOP_MODELS = [
129
+ "distilbert-base-uncased", "bert-base-uncased", "roberta-base",
130
+ "google/electra-base-discriminator", "albert-base-v2",
131
+ "facebook/bart-base", "gpt2", "t5-small",
132
+ "microsoft/deberta-base", "xlnet-base-cased"
133
+ ]
134
+
135
+ TOP_DATASETS = [
136
+ "imdb", "ag_news", "yelp_polarity",
137
+ "dbpedia_14", "amazon_polarity",
138
+ "tweet_eval", "glue", "sst2",
139
+ "cnn_dailymail", "emotion"
140
+ ]
141
+
142
+ # =========================
143
+ # Inference (Test chat)
144
+ # =========================
145
+ def chat_with_model(model_name, user_input):
146
+ model_dir = os.path.join("trained_models", model_name.replace("/", "_"))
147
+ if not os.path.exists(model_dir):
148
+ return "โŒ Model not trained yet. Train it first."
149
+
150
+ tokenizer = AutoTokenizer.from_pretrained(model_dir)
151
+ model = AutoModelForSequenceClassification.from_pretrained(model_dir)
152
+
153
+ inputs = tokenizer(user_input, return_tensors="pt", truncation=True, padding=True)
154
+ outputs = model(**inputs)
155
+ prediction = torch.argmax(outputs.logits, dim=-1).item()
156
+
157
+ # Save memory
158
+ conversation = {"input": user_input, "prediction": prediction}
159
+ save_memory(model_name, conversation)
160
+
161
+ return f"๐Ÿ”ฎ Prediction: {prediction}"
162
+
163
+ # =========================
164
+ # View Memory
165
+ # =========================
166
+ def view_memory(model_name):
167
+ memory = load_memory(model_name)
168
+ if not memory:
169
+ return "๐Ÿ“ญ No memory yet for this model."
170
+ return json.dumps(memory, indent=2)
171
+
172
+ # =========================
173
+ # View Logs
174
+ # =========================
175
+ def view_logs():
176
+ log_file = os.path.join("logs", "events.log")
177
+ if not os.path.exists(log_file):
178
+ return "๐Ÿ“ญ No logs yet."
179
+ with open(log_file, "r") as f:
180
+ return f.read()
181
+
182
+ # =========================
183
+ # User Guide / Manual
184
+ # =========================
185
+ USER_GUIDE = """
186
+ # ๐Ÿ“˜ AI Model Builder Guide
187
+
188
+ Welcome to your **all-in-one AI Model Builder**.
189
+ This app allows you to **train, fine-tune, test, and manage AI models** directly in a Hugging Face Space.
190
+
191
+ ---
192
+
193
+ ## ๐Ÿ”น Step 1: Training a Model
194
+ 1. Go to the **Training Tab**.
195
+ 2. Select a **model** from the Top-10 list or type your own Hugging Face model ID.
196
+ 3. Select a **dataset** from the Top-10 list or type your own Hugging Face dataset ID.
197
+ 4. Choose the number of **epochs** (training cycles).
198
+ 5. Click **Start Training**.
199
+ 6. Training progress will appear, and the model will be saved under `trained_models/`.
200
+
201
+ ---
202
+
203
+ ## ๐Ÿ”น Step 2: Testing Your Model
204
+ 1. Switch to the **Testing Tab**.
205
+ 2. Type any input in the chat box.
206
+ 3. The app will return a **prediction**.
207
+ 4. Every conversation is saved in **per-model memory**.
208
+
209
+ ---
210
+
211
+ ## ๐Ÿ”น Step 3: Viewing Memory
212
+ - Go to the **Memory Tab**.
213
+ - See past chats and predictions for each model.
214
+
215
+ ---
216
+
217
+ ## ๐Ÿ”น Step 4: Viewing Logs
218
+ - All activity is logged.
219
+ - Open the **Logs Tab** to view training sessions, progress, and errors.
220
+
221
+ ---
222
+
223
+ ## ๐Ÿ”น Technical Notes
224
+ - Training runs on **CPU** (slower but free).
225
+ - Uses Hugging Face **Transformers + Datasets**.
226
+ - Stores:
227
+ - Models โ†’ `trained_models/`
228
+ - Logs โ†’ `logs/events.log`
229
+ - Memory โ†’ `memory/memory_{model}.json`
230
+ """
231
+
232
+ # =========================
233
+ # Build Gradio UI
234
+ # =========================
235
+ with gr.Blocks() as demo:
236
+ gr.Markdown("# ๐Ÿง  AI Model Builder\nTrain, Fine-tune, Test, and Manage Your Own AI Models")
237
+
238
+ with gr.Tab("๐Ÿ› ๏ธ Training"):
239
+ with gr.Row():
240
+ model_dropdown = gr.Dropdown(choices=TOP_MODELS, label="Select Model", interactive=True)
241
+ model_textbox = gr.Textbox(label="Or enter custom model ID")
242
+ with gr.Row():
243
+ dataset_dropdown = gr.Dropdown(choices=TOP_DATASETS, label="Select Dataset", interactive=True)
244
+ dataset_textbox = gr.Textbox(label="Or enter custom dataset ID")
245
+ epochs = gr.Slider(1, 5, value=1, step=1, label="Epochs (Training Cycles)")
246
+ train_button = gr.Button("๐Ÿš€ Start Training")
247
+ train_output = gr.Textbox(label="Training Status")
248
+ progress_output = gr.JSON(label="Progress Details")
249
+
250
+ def run_training(model_dropdown, model_textbox, dataset_dropdown, dataset_textbox, epochs):
251
+ model_name = model_textbox if model_textbox else model_dropdown
252
+ dataset_name = dataset_textbox if dataset_textbox else dataset_dropdown
253
+ return train_model(model_name, dataset_name, epochs)
254
+
255
+ train_button.click(
256
+ run_training,
257
+ inputs=[model_dropdown, model_textbox, dataset_dropdown, dataset_textbox, epochs],
258
+ outputs=[train_output, progress_output]
259
+ )
260
 
261
+ with gr.Tab("๐Ÿ’ฌ Testing"):
262
+ test_model_name = gr.Textbox(label="Enter Model ID (must be trained first)")
263
+ test_input = gr.Textbox(label="Your Message")
264
+ test_button = gr.Button("๐Ÿ’ก Predict")
265
+ test_output = gr.Textbox(label="Model Response")
266
+ test_button.click(chat_with_model, inputs=[test_model_name, test_input], outputs=test_output)
267
+
268
+ with gr.Tab("๐Ÿงพ Memory"):
269
+ mem_model_name = gr.Textbox(label="Enter Model ID to View Memory")
270
+ mem_button = gr.Button("๐Ÿ“‚ Load Memory")
271
+ mem_output = gr.Textbox(label="Conversation Memory", lines=15)
272
+ mem_button.click(view_memory, inputs=mem_model_name, outputs=mem_output)
273
+
274
+ with gr.Tab("๐Ÿ“œ Logs"):
275
+ log_button = gr.Button("๐Ÿ“– Show Logs")
276
+ log_output = gr.Textbox(label="Logs", lines=20)
277
+ log_button.click(view_logs, outputs=log_output)
278
+
279
+ with gr.Tab("๐Ÿ“˜ Guide"):
280
+ gr.Markdown(USER_GUIDE)
281
+
282
+ # =========================
283
+ # Launch
284
+ # =========================
285
  if __name__ == "__main__":
286
+ demo.launch()