mereith commited on
Commit
fae0e6c
·
1 Parent(s): 2e3c698
Files changed (5) hide show
  1. .gitignore +1 -0
  2. README.md +114 -16
  3. api.py +182 -0
  4. app.py +254 -65
  5. utils.py +48 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ venv
README.md CHANGED
@@ -1,16 +1,114 @@
1
- ---
2
- title: Odr Demo
3
- emoji: 💬
4
- colorFrom: yellow
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 5.42.0
8
- app_file: app.py
9
- pinned: false
10
- hf_oauth: true
11
- hf_oauth_scopes:
12
- - inference-api
13
- license: mit
14
- ---
15
-
16
- An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ODR Demo
2
+
3
+ A demonstration project containing async SSE (Server-Sent Events) interface request functionality.
4
+
5
+ ## Features
6
+
7
+ - Async SSE streaming requests
8
+ - Support for custom parameters (deep thinking mode, debug mode, etc.)
9
+ - Returns async iterator for real-time data processing
10
+ - Supports both raw data and parsed JSON data modes
11
+ - Structured event parsing with `event` and `data` fields
12
+ - Proper SSE event type detection and handling
13
+
14
+ ## Install Dependencies
15
+
16
+ ```bash
17
+ pip install -r requirements.txt
18
+ ```
19
+
20
+ ## Usage
21
+
22
+ ### Basic Usage (Raw SSE Events)
23
+
24
+ ```python
25
+ import asyncio
26
+ from api import request_sse_stream
27
+
28
+ async def main():
29
+ query = "Hello, please introduce Python"
30
+
31
+ async for event_data in request_sse_stream(query):
32
+ event_type = event_data.get('event', 'unknown')
33
+ data_content = event_data.get('data', '')
34
+ print(f"Event: {event_type}")
35
+ print(f"Data: {data_content}")
36
+
37
+ asyncio.run(main())
38
+ ```
39
+
40
+ ### Using Parsed Data (JSON Parsed)
41
+
42
+ ```python
43
+ import asyncio
44
+ from api import request_sse_stream_parsed
45
+
46
+ async def main():
47
+ query = "What is machine learning?"
48
+
49
+ async for event_data in request_sse_stream_parsed(query):
50
+ event_type = event_data.get('event', 'unknown')
51
+ parsed_data = event_data.get('data', {})
52
+ print(f"Event: {event_type}")
53
+ print(f"Parsed Data: {parsed_data}")
54
+
55
+ asyncio.run(main())
56
+ ```
57
+
58
+ ### Using Class Methods (More Control)
59
+
60
+ ```python
61
+ import asyncio
62
+ from api import SSEClient
63
+
64
+ async def main():
65
+ client = SSEClient()
66
+
67
+ async for event_data in client.stream_chat(
68
+ query="Explain deep learning",
69
+ deep_thinking_mode=True, # Enable deep thinking
70
+ debug=True, # Enable debug mode
71
+ chat_id="my_custom_id" # Custom chat ID
72
+ ):
73
+ event_type = event_data.get('event', 'unknown')
74
+ data_content = event_data.get('data', '')
75
+ print(f"Event: {event_type}")
76
+ print(f"Data: {data_content}")
77
+
78
+ asyncio.run(main())
79
+ ```
80
+
81
+ ## Run Examples
82
+
83
+ ```bash
84
+ python example_usage.py
85
+ ```
86
+
87
+ ## API Parameters
88
+
89
+ - `query`: Required, user query content
90
+ - `deep_thinking_mode`: Optional, whether to enable deep thinking mode, default False
91
+ - `search_before_planning`: Optional, whether to search before planning, default False
92
+ - `debug`: Optional, whether to enable debug mode, default False
93
+ - `chat_id`: Optional, chat ID, will be auto-generated if not provided
94
+
95
+ ## Data Structure
96
+
97
+ All functions return an async iterator yielding dictionaries with the following structure:
98
+
99
+ ```python
100
+ {
101
+ "event": "message", # SSE event type (e.g., "message", "error", "data", etc.)
102
+ "data": "..." # Event data content
103
+ }
104
+ ```
105
+
106
+ - `request_sse_stream()`: Returns raw data where `data` field contains the original string
107
+ - `request_sse_stream_parsed()`: Returns parsed data where `data` field contains JSON objects (when possible)
108
+
109
+ ## File Description
110
+
111
+ - `api.py`: Main SSE client implementation
112
+ - `example_usage.py`: Usage examples
113
+ - `requirements.txt`: Project dependencies
114
+ - `app.py`: Gradio application (if exists)
api.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ import os
4
+ import uuid
5
+ from typing import AsyncIterator, Dict, Any
6
+ import aiohttp
7
+
8
+
9
+ class SSEClient:
10
+ """Async SSE client for streaming chat API requests"""
11
+
12
+ def __init__(self):
13
+ self.url = os.getenv("API_ENDPOINT")
14
+ self.headers = {
15
+ 'Content-Type': 'application/json',
16
+ 'User-Agent': 'HuggingFace-Gradio-Demo'
17
+ }
18
+
19
+ async def stream_chat(self, query: str,
20
+ deep_thinking_mode: bool = False,
21
+ search_before_planning: bool = False,
22
+ debug: bool = False,
23
+ chat_id: str = None) -> AsyncIterator[Dict[str, Any]]:
24
+ """
25
+ Async request to SSE interface and return streaming data with event parsing
26
+
27
+ Args:
28
+ query: User query content
29
+ deep_thinking_mode: Whether to enable deep thinking mode, default False
30
+ search_before_planning: Whether to search before planning, default False
31
+ debug: Whether to enable debug mode, default False
32
+ chat_id: Chat ID, will be auto-generated if not provided
33
+
34
+ Yields:
35
+ Dict[str, Any]: SSE event data with 'event' and 'data' fields
36
+ """
37
+ if chat_id is None:
38
+ chat_id = self._generate_chat_id()
39
+
40
+ # Build request data
41
+ data = {
42
+ "messages": [{
43
+ "id": chat_id,
44
+ "role": "user",
45
+ "type": "text",
46
+ "content": query
47
+ }],
48
+ "deep_thinking_mode": deep_thinking_mode,
49
+ "search_before_planning": search_before_planning,
50
+ "debug": debug,
51
+ "chatId": chat_id
52
+ }
53
+
54
+ async with aiohttp.ClientSession(
55
+ timeout=aiohttp.ClientTimeout(total=None) # No timeout limit
56
+ ) as session:
57
+ try:
58
+ async with session.post(
59
+ self.url,
60
+ headers=self.headers,
61
+ json=data
62
+ ) as response:
63
+ if response.status != 200:
64
+ raise Exception(f"Request failed with status code: {response.status}")
65
+
66
+ # Read SSE stream and parse events
67
+ current_event = None
68
+
69
+ async for line in response.content:
70
+ line = line.decode('utf-8').strip()
71
+ if line:
72
+ if line.startswith('event: '):
73
+ # Parse event type
74
+ current_event = line[7:] # Remove "event: " prefix
75
+ elif line.startswith('data: '):
76
+ # Parse data content
77
+ data_content = line[6:] # Remove "data: " prefix
78
+ if data_content and data_content != '[DONE]':
79
+ # Yield structured event data
80
+ yield {
81
+ 'event': current_event or 'message',
82
+ 'data': data_content
83
+ }
84
+ # Reset event for next message
85
+ current_event = None
86
+ elif line == '':
87
+ # Empty line indicates end of event, reset current_event
88
+ current_event = None
89
+ else:
90
+ # Handle other formats or raw data
91
+ yield {
92
+ 'event': current_event or 'data',
93
+ 'data': line
94
+ }
95
+ current_event = None
96
+
97
+ except asyncio.CancelledError:
98
+ # Handle cancellation
99
+ raise
100
+ except Exception as e:
101
+ raise Exception(f"SSE request error: {str(e)}")
102
+
103
+ def _generate_chat_id(self) -> str:
104
+ """Generate chat ID"""
105
+ return str(uuid.uuid4()).replace('-', '')[:21]
106
+
107
+ async def stream_chat_parsed(self, query: str, **kwargs) -> AsyncIterator[Dict[str, Any]]:
108
+ """
109
+ Async request to SSE interface and return parsed JSON data with event structure
110
+
111
+ Args:
112
+ query: User query content
113
+ **kwargs: Other parameters passed to stream_chat
114
+
115
+ Yields:
116
+ Dict[str, Any]: Event data with 'event' and 'data' fields, where 'data' contains parsed JSON
117
+ """
118
+ async for event_data in self.stream_chat(query, **kwargs):
119
+ try:
120
+ # Try to parse the data field as JSON
121
+ parsed_data = json.loads(event_data['data'])
122
+ yield {
123
+ 'event': event_data['event'],
124
+ 'data': parsed_data
125
+ }
126
+ except json.JSONDecodeError:
127
+ # If data is not valid JSON, keep original data
128
+ yield event_data
129
+ except (KeyError, TypeError):
130
+ # If event_data doesn't have expected structure, skip
131
+ continue
132
+
133
+
134
+ # Convenience functions
135
+ async def request_sse_stream(query: str, **kwargs) -> AsyncIterator[Dict[str, Any]]:
136
+ """
137
+ Convenience function: Async request to SSE interface and return raw event data
138
+
139
+ Args:
140
+ query: User query content
141
+ **kwargs: Other parameters
142
+
143
+ Yields:
144
+ Dict[str, Any]: Raw event data with 'event' and 'data' fields (data as string)
145
+ """
146
+ client = SSEClient()
147
+ async for event_data in client.stream_chat(query, **kwargs):
148
+ yield event_data
149
+
150
+
151
+ async def request_sse_stream_parsed(query: str, **kwargs) -> AsyncIterator[Dict[str, Any]]:
152
+ """
153
+ Convenience function: Async request to SSE interface and return structured event data
154
+
155
+ Args:
156
+ query: User query content
157
+ **kwargs: Other parameters
158
+
159
+ Yields:
160
+ Dict[str, Any]: Event data with 'event' and 'data' fields
161
+ """
162
+ client = SSEClient()
163
+ async for event_data in client.stream_chat_parsed(query, **kwargs):
164
+ yield event_data
165
+
166
+
167
+ # Example usage
168
+ async def main():
169
+ """Example usage method"""
170
+ query = "Hello"
171
+
172
+ print("=== SSE Event Stream ===")
173
+ async for event_data in request_sse_stream_parsed(query):
174
+ event_type = event_data.get('event', 'unknown')
175
+ data_content = event_data.get('data', {})
176
+ print(f"Event: {event_type}")
177
+ print(f"Data: {data_content}")
178
+ print("-" * 40)
179
+
180
+
181
+ if __name__ == "__main__":
182
+ asyncio.run(main())
app.py CHANGED
@@ -1,70 +1,259 @@
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
-
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
-
19
- messages = [{"role": "system", "content": system_message}]
20
-
21
- messages.extend(history)
22
-
23
- messages.append({"role": "user", "content": message})
24
-
25
- response = ""
26
-
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- type="messages",
49
- additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
- ],
61
- )
62
-
63
- with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
67
 
68
 
69
  if __name__ == "__main__":
70
- demo.launch()
 
 
1
+ import json
2
+ import logging
3
+ import uuid
4
+ from typing import Optional
5
  import gradio as gr
6
+
7
+ from api import request_sse_stream_parsed
8
+
9
+ from utils import contains_chinese, replace_chinese_punctuation
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ from typing import Optional
15
+
16
+
17
+ # ========================= Gradio Integration =========================
18
+
19
+ def _init_render_state():
20
+ return {
21
+ "agent_order": [],
22
+ "agents": {}, # agent_id -> {"agent_name": str, "tool_call_order": [], "tools": {tool_call_id: {...}}}
23
+ "current_agent_id": None,
24
+ "errors": [],
25
+ }
26
+
27
+ def _append_show_text(tool_entry: dict, delta: str):
28
+ existing = tool_entry.get("content", "")
29
+ tool_entry["content"] = existing + delta
30
+
31
+ def _is_empty_payload(value) -> bool:
32
+ if value is None:
33
+ return True
34
+ if isinstance(value, str):
35
+ stripped = value.strip()
36
+ return stripped == "" or stripped in ("{}", "[]")
37
+ if isinstance(value, (dict, list, tuple, set)):
38
+ return len(value) == 0
39
+ return False
40
+
41
+ def _render_markdown(state: dict) -> str:
42
+ lines = []
43
+ emoji_cycle = ["🧠", "🔎", "🛠️", "📚", "🤖", "🧪", "📝", "🧭", "⚙️", "🧮"]
44
+ # Render errors first if any
45
+ if state.get("errors"):
46
+ lines.append("### ❌ Errors")
47
+ for idx, err in enumerate(state["errors"], start=1):
48
+ lines.append(f"- **Error {idx}**: {err}")
49
+ lines.append("\n---\n")
50
+ for idx, agent_id in enumerate(state.get("agent_order", [])):
51
+ agent = state["agents"].get(agent_id, {})
52
+ agent_name = agent.get("agent_name", "unknown")
53
+ emoji = emoji_cycle[idx % len(emoji_cycle)]
54
+ lines.append(f"### {emoji} Agent: {agent_name}")
55
+ for call_id in agent.get("tool_call_order", []):
56
+ call = agent["tools"].get(call_id, {})
57
+ tool_name = call.get("tool_name", "unknown_tool")
58
+ if tool_name in ("show_text", "message"):
59
+ content = call.get("content", "")
60
+ if content:
61
+ lines.append(content)
62
+ else:
63
+ tool_input = call.get("input")
64
+ tool_output = call.get("output")
65
+ has_input = not _is_empty_payload(tool_input)
66
+ has_output = not _is_empty_payload(tool_output)
67
+ if not has_input and not has_output:
68
+ # No parameters, only show tool name with emoji on separate line
69
+ if tool_name == "Partial Summary":
70
+ lines.append("\n💡Partial Summary\n")
71
+ else:
72
+ lines.append(f"\n🔧{tool_name}\n")
73
+ else:
74
+ # Show as collapsible details for any tool with input or output
75
+ if tool_name == "Partial Summary":
76
+ summary = f"💡{tool_name} ({call_id[:8]})"
77
+ else:
78
+ summary = f"🔧{tool_name} ({call_id[:8]})"
79
+ lines.append(f"\n<details><summary>{summary}</summary>")
80
+ if has_input:
81
+ pretty = json.dumps(tool_input, ensure_ascii=False, indent=2)
82
+ lines.append("\n**Input**:\n")
83
+ lines.append(f"```json\n{pretty}\n```")
84
+ if has_output:
85
+ pretty = json.dumps(tool_output, ensure_ascii=False, indent=2)
86
+ lines.append("\n**Output**:\n")
87
+ lines.append(f"```json\n{pretty}\n```")
88
+ lines.append("</details>\n")
89
+ lines.append("\n---\n")
90
+ return "\n".join(lines) if lines else "Waiting..."
91
+
92
+ def _update_state_with_event(state: dict, message: dict):
93
+ event = message.get("event")
94
+ data = message.get("data", {})
95
+ if event == "start_of_agent":
96
+ agent_id = data.get("agent_id")
97
+ agent_name = data.get("agent_name", "unknown")
98
+ if agent_id and agent_id not in state["agents"]:
99
+ state["agents"][agent_id] = {
100
+ "agent_name": agent_name,
101
+ "tool_call_order": [],
102
+ "tools": {}
103
+ }
104
+ state["agent_order"].append(agent_id)
105
+ state["current_agent_id"] = agent_id
106
+ elif event == "end_of_agent":
107
+ # End marker, no special handling needed, keep structure
108
+ state["current_agent_id"] = None
109
+ elif event == "tool_call":
110
+ tool_call_id = data.get("tool_call_id")
111
+ tool_name = data.get("tool_name", "unknown_tool")
112
+ agent_id = state.get("current_agent_id") or (state["agent_order"][-1] if state["agent_order"] else None)
113
+ if not agent_id:
114
+ return state
115
+ agent = state["agents"].setdefault(agent_id, {"agent_name": "unknown", "tool_call_order": [], "tools": {}})
116
+ tools = agent["tools"]
117
+ if tool_call_id not in tools:
118
+ tools[tool_call_id] = {"tool_name": tool_name}
119
+ agent["tool_call_order"].append(tool_call_id)
120
+ entry = tools[tool_call_id]
121
+ if tool_name == "show_text" and "delta_input" in data:
122
+ delta = data.get("delta_input", {}).get("text", "")
123
+ _append_show_text(entry, delta)
124
+ elif tool_name == "show_text" and "tool_input" in data:
125
+ ti = data.get("tool_input")
126
+ text = ""
127
+ if isinstance(ti,dict):
128
+ text = ti.get("text", "") or ((ti.get('result') or {}).get("text") if isinstance(ti.get('result'),dict) else "")
129
+ elif isinstance(ti,str):
130
+ text = ti
131
+ if text:
132
+ _append_show_text(entry, text)
133
+ else:
134
+ # Distinguish between input and output:
135
+ if "tool_input" in data:
136
+ # Could be input (first time) or output with result (second time)
137
+ ti = data["tool_input"]
138
+ # If contains result, assign to output; otherwise assign to input
139
+ if isinstance(ti, dict) and "result" in ti:
140
+ entry["output"] = ti
141
+ else:
142
+ # Only update input if we don't already have valid input data, or if the new data is not empty
143
+ if "input" not in entry or not _is_empty_payload(ti):
144
+ entry["input"] = ti
145
+ elif event == "message":
146
+ # Same incremental text display as show_text, aggregated by message_id
147
+ message_id = data.get("message_id")
148
+ agent_id = state.get("current_agent_id") or (state["agent_order"][-1] if state["agent_order"] else None)
149
+ if not agent_id:
150
+ return state
151
+ agent = state["agents"].setdefault(agent_id, {"agent_name": "unknown", "tool_call_order": [], "tools": {}})
152
+ tools = agent["tools"]
153
+ if message_id not in tools:
154
+ tools[message_id] = {"tool_name": "message"}
155
+ agent["tool_call_order"].append(message_id)
156
+ entry = tools[message_id]
157
+ delta_content = (data.get("delta") or {}).get("content", "")
158
+ if isinstance(delta_content, str) and delta_content:
159
+ _append_show_text(entry, delta_content)
160
+ elif event == "error":
161
+ # Collect errors, display uniformly during rendering
162
+ err_text = data.get("error") if isinstance(data, dict) else None
163
+ if not err_text:
164
+ try:
165
+ err_text = json.dumps(data, ensure_ascii=False)
166
+ except Exception:
167
+ err_text = str(data)
168
+ state.setdefault("errors", []).append(err_text)
169
+ else:
170
+ # Ignore heartbeat or other events
171
+ pass
172
+ return state
173
+
174
+
175
+ def _spinner_markup(running: bool) -> str:
176
+ if not running:
177
+ return ""
178
+ return (
179
+ "\n\n<div style=\"display:flex;align-items:center;gap:8px;color:#555;margin-top:8px;\">"
180
+ "<div style=\"width:16px;height:16px;border:2px solid #ddd;border-top-color:#3b82f6;border-radius:50%;animation:spin 0.8s linear infinite;\"></div>"
181
+ "<span>Generating...</span>"
182
+ "</div>\n<style>@keyframes spin{to{transform:rotate(360deg)}}</style>\n"
183
+ )
184
+
185
+ async def gradio_run(query: str, ui_state: Optional[dict]):
186
+ query = replace_chinese_punctuation(query or "")
187
+ if contains_chinese(query):
188
+ yield (
189
+ "we only support English input for the time being.",
190
+ gr.update(interactive=True),
191
+ gr.update(interactive=False),
192
+ ui_state or {"task_id": None}
193
+ )
194
+ return
195
+ task_id = str(uuid.uuid4())
196
+ if not ui_state:
197
+ ui_state = {"task_id": task_id}
198
+ else:
199
+ ui_state = {**ui_state, "task_id": task_id}
200
+ state = _init_render_state()
201
+ # Initial: disable Run, enable Stop, and show spinner at bottom of text
202
+ yield (
203
+ _render_markdown(state) + _spinner_markup(True),
204
+ gr.update(interactive=False),
205
+ gr.update(interactive=True),
206
+ ui_state
207
+ )
208
+ async for message in request_sse_stream_parsed(query):
209
+ state = _update_state_with_event(state, message)
210
+ md = _render_markdown(state)
211
+ yield (
212
+ md + _spinner_markup(True),
213
+ gr.update(interactive=False),
214
+ gr.update(interactive=True),
215
+ ui_state
216
+ )
217
+ # End: enable Run, disable Stop, remove spinner
218
+ yield (
219
+ _render_markdown(state),
220
+ gr.update(interactive=True),
221
+ gr.update(interactive=False),
222
+ ui_state
223
+ )
224
+
225
+ def stop_current(ui_state: Optional[dict]):
226
+ # Immediately switch button availability: enable Run, disable Stop
227
+ return (
228
+ gr.update(interactive=True),
229
+ gr.update(interactive=False),
230
+ )
231
+
232
+ def build_demo():
233
+ custom_css = """
234
+ #log-view { border: 1px solid #ececec; padding: 12px; border-radius: 8px; scroll-behavior: smooth; }
235
  """
236
+ with gr.Blocks(css=custom_css) as demo:
237
+ gr.Markdown("""
238
+ **MiroMind - Gradio Demo**
239
+ Enter an English question and observe Agents and tool calls in real time (Markdown + collapsible sections).
240
+ """)
241
+ with gr.Row():
242
+ inp = gr.Textbox(lines=3, label="Question (English only)")
243
+ with gr.Row():
244
+ run_btn = gr.Button("Run")
245
+ stop_btn = gr.Button("Stop", variant="stop", interactive=False)
246
+ out_md = gr.Markdown("", elem_id="log-view")
247
+ ui_state = gr.State({"task_id": None})
248
+ # run: outputs -> markdown, run_btn(update), stop_btn(update), ui_state
249
+ run_btn.click(fn=gradio_run, inputs=[inp, ui_state], outputs=[out_md, run_btn, stop_btn, ui_state])
250
+ # stop: outputs -> run_btn(update), stop_btn(update)
251
+ stop_btn.click(fn=stop_current, inputs=[ui_state], outputs=[run_btn, stop_btn])
252
+ return demo
253
+
254
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
 
256
 
257
  if __name__ == "__main__":
258
+ demo = build_demo()
259
+ demo.launch()
utils.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+ def contains_chinese(text):
4
+ """
5
+ Detect if a string contains Chinese characters or Chinese punctuation
6
+
7
+ Args:
8
+ text (str): The string to detect
9
+
10
+ Returns:
11
+ bool: True if contains Chinese characters or punctuation, False otherwise
12
+ """
13
+ # Chinese character Unicode ranges:
14
+ # \u4e00-\u9fff: CJK Unified Ideographs
15
+ # \u3400-\u4dbf: CJK Extension A
16
+ # \uf900-\ufaff: CJK Compatibility Ideographs
17
+ # \u3000-\u303f: CJK Symbols and Punctuation
18
+ # \uff00-\uffef: Fullwidth ASCII, Fullwidth punctuation
19
+ chinese_pattern = re.compile(r'[\u4e00-\u9fff\u3400-\u4dbf\uf900-\ufaff\u3000-\u303f\uff00-\uffef]')
20
+ return bool(chinese_pattern.search(text))
21
+
22
+ def replace_chinese_punctuation(text):
23
+ # Handle single-character replacements with translate
24
+ punctuation_map = str.maketrans({
25
+ ',': ',',
26
+ '。': '.',
27
+ '!': '!',
28
+ '?': '?',
29
+ ';': ';',
30
+ ':': ':',
31
+ '“': '"',
32
+ '”': '"',
33
+ '‘': "'",
34
+ '’': "'",
35
+ '(': '(',
36
+ ')': ')',
37
+ '【': '[',
38
+ '】': ']',
39
+ '《': '<',
40
+ '》': '>',
41
+ '、': ',',
42
+ '—': '-'
43
+ })
44
+ # First, replace multi-character punctuation
45
+ text = text.replace('……', '...')
46
+ # Then apply single-character replacements
47
+ return text.translate(punctuation_map)
48
+