portmafia9719 commited on
Commit
5bb9924
·
1 Parent(s): 81917a3

Update agent code

Browse files
Files changed (4) hide show
  1. agent.py +129 -0
  2. app.py +199 -196
  3. requirements.txt +8 -2
  4. tools.py +170 -0
agent.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """GAIA benchmark agent using *smolagents*.
2
+
3
+ This module exposes:
4
+
5
+ * ``gaia_agent()`` – factory returning a ready‑to‑use agent instance.
6
+ * ``GAIAAgent`` – subclass of ``smolagents.CodeAgent``.
7
+
8
+ The LLM backend is chosen at runtime via the ``MODEL_PROVIDER``
9
+ environment variable (``hf`` or ``openai``) exactly like *example.py*.
10
+ """
11
+
12
+ import os
13
+ from typing import Any, Sequence
14
+
15
+ from dotenv import load_dotenv
16
+
17
+ # SmolAgents Tools
18
+ from smolagents import (
19
+ CodeAgent,
20
+ DuckDuckGoSearchTool,
21
+ Tool
22
+ )
23
+
24
+ # Custom Tools from tools.py
25
+ from tools import (
26
+ PythonRunTool,
27
+ ExcelLoaderTool,
28
+ YouTubeTranscriptTool,
29
+ AudioTranscriptionTool,
30
+ SimpleOCRTool,
31
+ )
32
+
33
+
34
+ # ---------------------------------------------------------------------------
35
+ # Load the added system prompt from system_prompt.txt (located in the same directory)
36
+ # ---------------------------------------------------------------------------
37
+ ADDED_PROMPT_PATH = os.path.join(os.path.dirname(__file__), "added_prompt.txt")
38
+ with open(ADDED_PROMPT_PATH, "r", encoding="utf-8") as f:
39
+ ADDED_PROMPT = f.read().strip()
40
+
41
+
42
+ # ---------------------------------------------------------------------------
43
+ # Model selection helper
44
+ # ---------------------------------------------------------------------------
45
+
46
+ load_dotenv() # Make sure we read credentials from .env when running locally
47
+
48
+ def _select_model():
49
+ """Return a smolagents *model* as configured by the ``MODEL_PROVIDER`` env."""
50
+
51
+ provider = os.getenv("MODEL_PROVIDER", "hf").lower()
52
+
53
+ if provider == "hf":
54
+ from smolagents import InferenceClientModel
55
+ hf_model_id = os.getenv("HF_MODEL", "HuggingFaceH4/zephyr-7b-beta")
56
+ hf_token = os.getenv("HF_API_KEY")
57
+ return InferenceClientModel(
58
+ model_id=hf_model_id,
59
+ token=hf_token
60
+ )
61
+
62
+ if provider == "openai":
63
+ from smolagents import OpenAIServerModel
64
+ openai_model_id = os.getenv("OPENAI_MODEL", "gpt-3.5-turbo")
65
+ openai_token = os.getenv("OPENAI_API_KEY")
66
+ return OpenAIServerModel(
67
+ model_id=openai_model_id,
68
+ api_key=openai_token
69
+ )
70
+
71
+ raise ValueError(
72
+ f"Unsupported MODEL_PROVIDER: {provider!r}. "
73
+ "Use 'hf' (default) or 'openai'."
74
+ )
75
+
76
+ # ---------------------------------------------------------------------------
77
+ # Core Agent implementation
78
+ # ---------------------------------------------------------------------------
79
+
80
+ DEFAULT_TOOLS = [
81
+ DuckDuckGoSearchTool(),
82
+ PythonRunTool(),
83
+ ExcelLoaderTool(),
84
+ YouTubeTranscriptTool(),
85
+ AudioTranscriptionTool(),
86
+ SimpleOCRTool(),
87
+ ]
88
+
89
+ class GAIAAgent(CodeAgent):
90
+ def __init__(
91
+ self,
92
+ tools=None
93
+ ):
94
+ super().__init__(
95
+ tools=tools or DEFAULT_TOOLS,
96
+ model=_select_model()
97
+ )
98
+ # Append the additional prompt to the existing system prompt
99
+ self.prompt_templates["system_prompt"] += f"\n\n{ADDED_PROMPT}"
100
+
101
+ # Convenience so the object itself can be *called* directly
102
+ def __call__(self, question: str, **kwargs: Any) -> str:
103
+ steps = self.run(question, **kwargs)
104
+ # If steps is a primitive, just return it
105
+ if isinstance(steps, (int, float, str)):
106
+ return str(steps).strip()
107
+ last_step = None
108
+ for step in steps:
109
+ last_step = step
110
+ # Defensive: handle int/float/str directly
111
+ if isinstance(last_step, (int, float, str)):
112
+ return str(last_step).strip()
113
+ answer = getattr(last_step, "answer", None)
114
+ if answer is not None:
115
+ return str(answer).strip()
116
+ return str(last_step).strip()
117
+
118
+ # ---------------------------------------------------------------------------
119
+ # Factory helpers expected by app.py
120
+ # ---------------------------------------------------------------------------
121
+
122
+ def gaia_agent(*, extra_tools: Sequence[Tool] | None = None) -> GAIAAgent:
123
+ # Compose the toolset: always include all default tools, plus any extras
124
+ toolset = list(DEFAULT_TOOLS)
125
+ if extra_tools:
126
+ toolset.extend(extra_tools)
127
+ return GAIAAgent(tools=toolset)
128
+
129
+ __all__ = ["GAIAAgent", "gaia_agent"]
app.py CHANGED
@@ -1,196 +1,199 @@
1
- import os
2
- import gradio as gr
3
- import requests
4
- import inspect
5
- import pandas as pd
6
-
7
- # (Keep Constants as is)
8
- # --- Constants ---
9
- DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
-
11
- # --- Basic Agent Definition ---
12
- # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
- class BasicAgent:
14
- def __init__(self):
15
- print("BasicAgent initialized.")
16
- def __call__(self, question: str) -> str:
17
- print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
21
-
22
- def run_and_submit_all( profile: gr.OAuthProfile | None):
23
- """
24
- Fetches all questions, runs the BasicAgent on them, submits all answers,
25
- and displays the results.
26
- """
27
- # --- Determine HF Space Runtime URL and Repo URL ---
28
- space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
29
-
30
- if profile:
31
- username= f"{profile.username}"
32
- print(f"User logged in: {username}")
33
- else:
34
- print("User not logged in.")
35
- return "Please Login to Hugging Face with the button.", None
36
-
37
- api_url = DEFAULT_API_URL
38
- questions_url = f"{api_url}/questions"
39
- submit_url = f"{api_url}/submit"
40
-
41
- # 1. Instantiate Agent ( modify this part to create your agent)
42
- try:
43
- agent = BasicAgent()
44
- except Exception as e:
45
- print(f"Error instantiating agent: {e}")
46
- return f"Error initializing agent: {e}", None
47
- # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
48
- agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
49
- print(agent_code)
50
-
51
- # 2. Fetch Questions
52
- print(f"Fetching questions from: {questions_url}")
53
- try:
54
- response = requests.get(questions_url, timeout=15)
55
- response.raise_for_status()
56
- questions_data = response.json()
57
- if not questions_data:
58
- print("Fetched questions list is empty.")
59
- return "Fetched questions list is empty or invalid format.", None
60
- print(f"Fetched {len(questions_data)} questions.")
61
- except requests.exceptions.RequestException as e:
62
- print(f"Error fetching questions: {e}")
63
- return f"Error fetching questions: {e}", None
64
- except requests.exceptions.JSONDecodeError as e:
65
- print(f"Error decoding JSON response from questions endpoint: {e}")
66
- print(f"Response text: {response.text[:500]}")
67
- return f"Error decoding server response for questions: {e}", None
68
- except Exception as e:
69
- print(f"An unexpected error occurred fetching questions: {e}")
70
- return f"An unexpected error occurred fetching questions: {e}", None
71
-
72
- # 3. Run your Agent
73
- results_log = []
74
- answers_payload = []
75
- print(f"Running agent on {len(questions_data)} questions...")
76
- for item in questions_data:
77
- task_id = item.get("task_id")
78
- question_text = item.get("question")
79
- if not task_id or question_text is None:
80
- print(f"Skipping item with missing task_id or question: {item}")
81
- continue
82
- try:
83
- submitted_answer = agent(question_text)
84
- answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
86
- except Exception as e:
87
- print(f"Error running agent on task {task_id}: {e}")
88
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
89
-
90
- if not answers_payload:
91
- print("Agent did not produce any answers to submit.")
92
- return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
93
-
94
- # 4. Prepare Submission
95
- submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
96
- status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
97
- print(status_update)
98
-
99
- # 5. Submit
100
- print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
101
- try:
102
- response = requests.post(submit_url, json=submission_data, timeout=60)
103
- response.raise_for_status()
104
- result_data = response.json()
105
- final_status = (
106
- f"Submission Successful!\n"
107
- f"User: {result_data.get('username')}\n"
108
- f"Overall Score: {result_data.get('score', 'N/A')}% "
109
- f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
110
- f"Message: {result_data.get('message', 'No message received.')}"
111
- )
112
- print("Submission successful.")
113
- results_df = pd.DataFrame(results_log)
114
- return final_status, results_df
115
- except requests.exceptions.HTTPError as e:
116
- error_detail = f"Server responded with status {e.response.status_code}."
117
- try:
118
- error_json = e.response.json()
119
- error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
120
- except requests.exceptions.JSONDecodeError:
121
- error_detail += f" Response: {e.response.text[:500]}"
122
- status_message = f"Submission Failed: {error_detail}"
123
- print(status_message)
124
- results_df = pd.DataFrame(results_log)
125
- return status_message, results_df
126
- except requests.exceptions.Timeout:
127
- status_message = "Submission Failed: The request timed out."
128
- print(status_message)
129
- results_df = pd.DataFrame(results_log)
130
- return status_message, results_df
131
- except requests.exceptions.RequestException as e:
132
- status_message = f"Submission Failed: Network error - {e}"
133
- print(status_message)
134
- results_df = pd.DataFrame(results_log)
135
- return status_message, results_df
136
- except Exception as e:
137
- status_message = f"An unexpected error occurred during submission: {e}"
138
- print(status_message)
139
- results_df = pd.DataFrame(results_log)
140
- return status_message, results_df
141
-
142
-
143
- # --- Build Gradio Interface using Blocks ---
144
- with gr.Blocks() as demo:
145
- gr.Markdown("# Basic Agent Evaluation Runner")
146
- gr.Markdown(
147
- """
148
- **Instructions:**
149
-
150
- 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
151
- 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
152
- 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
153
-
154
- ---
155
- **Disclaimers:**
156
- Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
157
- This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
158
- """
159
- )
160
-
161
- gr.LoginButton()
162
-
163
- run_button = gr.Button("Run Evaluation & Submit All Answers")
164
-
165
- status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
166
- # Removed max_rows=10 from DataFrame constructor
167
- results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
168
-
169
- run_button.click(
170
- fn=run_and_submit_all,
171
- outputs=[status_output, results_table]
172
- )
173
-
174
- if __name__ == "__main__":
175
- print("\n" + "-"*30 + " App Starting " + "-"*30)
176
- # Check for SPACE_HOST and SPACE_ID at startup for information
177
- space_host_startup = os.getenv("SPACE_HOST")
178
- space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
179
-
180
- if space_host_startup:
181
- print(f" SPACE_HOST found: {space_host_startup}")
182
- print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
183
- else:
184
- print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
185
-
186
- if space_id_startup: # Print repo URLs if SPACE_ID is found
187
- print(f" SPACE_ID found: {space_id_startup}")
188
- print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
189
- print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
190
- else:
191
- print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
192
-
193
- print("-"*(60 + len(" App Starting ")) + "\n")
194
-
195
- print("Launching Gradio Interface for Basic Agent Evaluation...")
196
- demo.launch(debug=True, share=False)
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import requests
4
+ import pandas as pd
5
+
6
+ # --- Our Agent ---
7
+ from agent import gaia_agent
8
+
9
+ # Debugging level. If DEBUG=0 then DEBUG will be False. If DEBUG=1 then DEBUG will be True.
10
+ DEBUG = os.getenv("DEBUG", "0") == "1"
11
+
12
+ # (Keep Constants as is)
13
+ # --- Constants ---
14
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
15
+
16
+ def run_and_submit_all( profile: gr.OAuthProfile | None):
17
+ """
18
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
19
+ and displays the results.
20
+ """
21
+ # --- Determine HF Space Runtime URL and Repo URL ---
22
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
23
+
24
+ if profile:
25
+ username= f"{profile.username}"
26
+ print(f"User logged in: {username}")
27
+ else:
28
+ print("User not logged in.")
29
+ return "Please Login to Hugging Face with the button.", None
30
+
31
+ api_url = DEFAULT_API_URL
32
+ questions_url = f"{api_url}/questions"
33
+ submit_url = f"{api_url}/submit"
34
+
35
+ # 1. Instantiate Agent (now using smolagents)
36
+ try:
37
+ agent = gaia_agent()
38
+ print("SmolAgent instantiated successfully.")
39
+ except Exception as e:
40
+ print(f"Error instantiating agent: {e}")
41
+ return f"Error initializing agent: {e}", None
42
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( useful for others so please keep it public)
43
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
44
+ print(agent_code)
45
+
46
+ # 2. Fetch Questions
47
+ print(f"Fetching questions from: {questions_url}")
48
+ import json
49
+
50
+ try:
51
+ response = requests.get(questions_url, timeout=15)
52
+ response.raise_for_status()
53
+ questions_data = response.json()
54
+ if not questions_data:
55
+ print("Fetched questions list is empty.")
56
+ return "Fetched questions list is empty or invalid format.", None
57
+ print(f"Fetched {len(questions_data)} questions.")
58
+ except json.JSONDecodeError as e:
59
+ print(f"Error decoding JSON response from questions endpoint: {e}")
60
+ print(f"Response text: {response.text[:500]}")
61
+ return f"Error decoding server response for questions: {e}", None
62
+ except requests.exceptions.RequestException as e:
63
+ print(f"Error fetching questions: {e}")
64
+ return f"Error fetching questions: {e}", None
65
+ except Exception as e:
66
+ print(f"An unexpected error occurred fetching questions: {e}")
67
+ return f"An unexpected error occurred fetching questions: {e}", None
68
+
69
+ # 3. Run the Agent
70
+ results_log = []
71
+ answers_payload = []
72
+ print(f"Running agent on {len(questions_data)} questions...")
73
+ for item in questions_data:
74
+ task_id = item.get("task_id")
75
+ question_text = item.get("question")
76
+ if not task_id or question_text is None:
77
+ print(f"Skipping item with missing task_id or question: {item}")
78
+ continue
79
+ try:
80
+ submitted_answer = agent(question_text)
81
+ # --- DEBUG LOGGING ---
82
+ if DEBUG:
83
+ print(f"[DEBUG] Task {task_id}: Answer type: {type(submitted_answer)}, Value: {repr(submitted_answer)}")
84
+ else:
85
+ print(f"[{task_id}] {question_text[:50]}... {submitted_answer[:40]}")
86
+
87
+ # Force string type here just in case (defensive)
88
+ submitted_answer = str(submitted_answer).strip()
89
+ answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
90
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
91
+ except Exception as e:
92
+ print(f"Error running agent on task {task_id}: {e}")
93
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
94
+
95
+ if not answers_payload:
96
+ print("Agent did not produce any answers to submit.")
97
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
98
+
99
+ # 4. Prepare Submission
100
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
101
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
102
+ print(status_update)
103
+
104
+ # 5. Submit
105
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
106
+ try:
107
+ response = requests.post(submit_url, json=submission_data, timeout=60)
108
+ response.raise_for_status()
109
+ result_data = response.json()
110
+ final_status = (
111
+ f"Submission Successful!\n"
112
+ f"User: {result_data.get('username')}\n"
113
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
114
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
115
+ f"Message: {result_data.get('message', 'No message received.')}"
116
+ )
117
+ print("Submission successful.")
118
+ results_df = pd.DataFrame(results_log)
119
+ return final_status, results_df
120
+ except requests.exceptions.HTTPError as e:
121
+ error_detail = f"Server responded with status {e.response.status_code}."
122
+ try:
123
+ error_json = e.response.json()
124
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
125
+ except requests.exceptions.JSONDecodeError:
126
+ error_detail += f" Response: {e.response.text[:500]}"
127
+ status_message = f"Submission Failed: {error_detail}"
128
+ print(status_message)
129
+ results_df = pd.DataFrame(results_log)
130
+ return status_message, results_df
131
+ except requests.exceptions.Timeout:
132
+ status_message = "Submission Failed: The request timed out."
133
+ print(status_message)
134
+ results_df = pd.DataFrame(results_log)
135
+ return status_message, results_df
136
+ except requests.exceptions.RequestException as e:
137
+ status_message = f"Submission Failed: Network error - {e}"
138
+ print(status_message)
139
+ results_df = pd.DataFrame(results_log)
140
+ return status_message, results_df
141
+ except Exception as e:
142
+ status_message = f"An unexpected error occurred during submission: {e}"
143
+ print(status_message)
144
+ results_df = pd.DataFrame(results_log)
145
+ return status_message, results_df
146
+
147
+
148
+ # --- Build Gradio Interface using Blocks ---
149
+ with gr.Blocks() as demo:
150
+ gr.Markdown("# Agent Evaluation Runner")
151
+ gr.Markdown(
152
+ """
153
+ **Instructions:**
154
+
155
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
156
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
157
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
158
+
159
+ ---
160
+ **Disclaimers:**
161
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
162
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
163
+ """
164
+ )
165
+
166
+ gr.LoginButton()
167
+
168
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
169
+
170
+ status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
171
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
172
+
173
+ run_button.click(
174
+ fn=run_and_submit_all,
175
+ outputs=[status_output, results_table]
176
+ )
177
+
178
+ if __name__ == "__main__":
179
+ print("\n" + "-"*30 + " App Starting " + "-"*30)
180
+ space_host_startup = os.getenv("SPACE_HOST")
181
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
182
+
183
+ if space_host_startup:
184
+ print(f"SPACE_HOST found: {space_host_startup}")
185
+ print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
186
+ else:
187
+ print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
188
+
189
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
190
+ print(f"✅ SPACE_ID found: {space_id_startup}")
191
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
192
+ print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
193
+ else:
194
+ print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
195
+
196
+ print("-"*(60 + len(" App Starting ")) + "\n")
197
+
198
+ print("Launching Gradio Interface for Agent Evaluation…")
199
+ demo.launch(debug=True, share=False)
requirements.txt CHANGED
@@ -1,2 +1,8 @@
1
- gradio
2
- requests
 
 
 
 
 
 
 
1
+ gradio
2
+ requests
3
+ pandas
4
+ smolagents[openai]
5
+ duckduckgo-search
6
+ youtube-transcript-api
7
+ pytesseract
8
+ pillow
tools.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Custom tools for smolagents GAIA agent
2
+ from __future__ import annotations
3
+ import contextlib
4
+ import io
5
+ import os
6
+ from typing import Any, Dict, List
7
+
8
+ from smolagents import Tool
9
+
10
+ # ---- 1. PythonRunTool ------------------------------------------------------
11
+ class PythonRunTool(Tool):
12
+ name = "python_run"
13
+ description = """
14
+ Execute trusted Python code and return printed output + repr() of the last expression (or _result variable).
15
+ """
16
+ inputs = {
17
+ "code": {
18
+ "type": "string",
19
+ "description": "Python code to execute",
20
+ "required": True
21
+ }
22
+ }
23
+ output_type = "string"
24
+
25
+ def forward(self, code: str) -> str:
26
+ buf, ns = io.StringIO(), {}
27
+ last = None
28
+ try:
29
+ with contextlib.redirect_stdout(buf):
30
+ exec(compile(code, "<agent-python>", "exec"), {}, ns)
31
+ last = ns.get("_result", None)
32
+ except Exception as e:
33
+ raise RuntimeError(f"PythonRunTool error: {e}") from e
34
+ out = buf.getvalue()
35
+ # Always return a string
36
+ result = (out + (repr(last) if last is not None else "")).strip()
37
+ return str(result)
38
+
39
+ # ---- 2. ExcelLoaderTool ----------------------------------------------------
40
+ class ExcelLoaderTool(Tool):
41
+ name = "load_spreadsheet"
42
+ description = """
43
+ Read .xlsx/.xls/.csv from disk and return rows as a list of dictionaries with string keys.
44
+ """
45
+ inputs = {
46
+ "path": {
47
+ "type": "string",
48
+ "description": "Path to .csv/.xls/.xlsx file",
49
+ "required": True
50
+ },
51
+ "sheet": {
52
+ "type": "string",
53
+ "description": "Sheet name or index (optional, required for Excel files only)",
54
+ "required": False,
55
+ "default": "",
56
+ "nullable": True
57
+ }
58
+ }
59
+ output_type = "array"
60
+
61
+ def forward(self, path: str, sheet: str | int | None = None) -> str:
62
+ import pandas as pd
63
+ if not os.path.isfile(path):
64
+ raise FileNotFoundError(path)
65
+ ext = os.path.splitext(path)[1].lower()
66
+ if sheet == "":
67
+ sheet = None
68
+ if ext == ".csv":
69
+ df = pd.read_csv(path)
70
+ else:
71
+ df = pd.read_excel(path, sheet_name=sheet)
72
+ if isinstance(df, dict):
73
+ # If user did not specify a sheet, use the first one found
74
+ first_sheet = next(iter(df))
75
+ df = df[first_sheet]
76
+ records = [{str(k): v for k, v in row.items()} for row in df.to_dict(orient="records")]
77
+ # Always return a string
78
+ return str(records)
79
+
80
+ # ---- 3. YouTubeTranscriptTool ---------------------------------------------
81
+ class YouTubeTranscriptTool(Tool):
82
+ name = "youtube_transcript"
83
+ description = """
84
+ Return the subtitles of a YouTube URL using youtube-transcript-api.
85
+ """
86
+ inputs = {
87
+ "url": {
88
+ "type": "string",
89
+ "description": "YouTube URL",
90
+ "required": True
91
+ },
92
+ "lang": {
93
+ "type": "string",
94
+ "description": "Transcript language (default: en)",
95
+ "required": False,
96
+ "default": "en",
97
+ "nullable": True
98
+ }
99
+ }
100
+ output_type = "string"
101
+
102
+ def forward(self, url: str, lang: str = "en") -> str:
103
+ from urllib.parse import urlparse, parse_qs
104
+ from youtube_transcript_api._api import YouTubeTranscriptApi
105
+ vid = parse_qs(urlparse(url).query).get("v", [None])[0] or url.split("/")[-1]
106
+ data = YouTubeTranscriptApi.get_transcript(vid, languages=[lang, "en", "en-US", "en-GB"])
107
+ text = " ".join(d["text"] for d in data).strip()
108
+ return str(text)
109
+
110
+ # ---- 4. AudioTranscriptionTool --------------------------------------------
111
+ class AudioTranscriptionTool(Tool):
112
+ name = "transcribe_audio"
113
+ description = """
114
+ Transcribe an audio file with OpenAI Whisper, returns plain text."
115
+ """
116
+ inputs = {
117
+ "path": {
118
+ "type": "string",
119
+ "description": "Path to audio file",
120
+ "required": True
121
+ },
122
+ "model": {
123
+ "type": "string",
124
+ "description": "Model name for transcription (default: whisper-1)",
125
+ "required": False,
126
+ "default": "whisper-1",
127
+ "nullable": True
128
+ }
129
+ }
130
+ output_type = "string"
131
+
132
+ def forward(self, path: str, model: str = "whisper-1") -> str:
133
+ import openai
134
+ if not os.path.isfile(path):
135
+ raise FileNotFoundError(path)
136
+ client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
137
+ with open(path, "rb") as fp:
138
+ transcript = client.audio.transcriptions.create(model=model, file=fp)
139
+ return str(transcript.text.strip())
140
+
141
+ # ---- 5. SimpleOCRTool ------------------------------------------------------
142
+ class SimpleOCRTool(Tool):
143
+ name = "image_ocr"
144
+ description = """
145
+ Return any text spotted in an image via pytesseract OCR.
146
+ """
147
+ inputs = {
148
+ "path": {
149
+ "type": "string",
150
+ "description": "Path to image file",
151
+ "required": True
152
+ }
153
+ }
154
+ output_type = "string"
155
+
156
+ def forward(self, path: str) -> str:
157
+ from PIL import Image
158
+ import pytesseract
159
+ if not os.path.isfile(path):
160
+ raise FileNotFoundError(path)
161
+ return str(pytesseract.image_to_string(Image.open(path)).strip())
162
+
163
+ # ---------------------------------------------------------------------------
164
+ __all__ = [
165
+ "PythonRunTool",
166
+ "ExcelLoaderTool",
167
+ "YouTubeTranscriptTool",
168
+ "AudioTranscriptionTool",
169
+ "SimpleOCRTool",
170
+ ]