Spaces:
Sleeping
Sleeping
chibuikeeugene
commited on
Commit
·
625fa75
1
Parent(s):
d4d3f94
new update
Browse files
agent.py
CHANGED
@@ -1,11 +1,24 @@
|
|
1 |
# use a multimodal llm
|
|
|
2 |
from llama_index.core.agent.workflow import AgentWorkflow
|
|
|
3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
-
def basic_agent(tool, llm_model):
|
6 |
-
"""a basic agent with the ability to take decisions, act by calling the right tools and provide answer to the input prompt query string"""
|
7 |
-
agent = AgentWorkflow.from_tools_or_functions(
|
8 |
-
tools_or_functions=tool,
|
9 |
-
llm=llm_model
|
10 |
-
)
|
11 |
-
return agent
|
|
|
1 |
# use a multimodal llm
|
2 |
+
from typing import Any
|
3 |
from llama_index.core.agent.workflow import AgentWorkflow
|
4 |
+
from llama_index.core.workflow import Context
|
5 |
|
6 |
+
class Basic_Agent():
|
7 |
+
"""base class for our basic agent"""
|
8 |
+
def __init__(self, tool, llm_model) -> None:
|
9 |
+
self.tool = tool
|
10 |
+
self.llm_model = llm_model
|
11 |
+
|
12 |
+
async def __call__(self, input_prompt:str) -> str:
|
13 |
+
"""a basic agent with the ability to take decisions, act by calling the right tools and provide answer to the input prompt query string"""
|
14 |
+
agent = AgentWorkflow.from_tools_or_functions(
|
15 |
+
tools_or_functions=self.tool,
|
16 |
+
llm=self.llm_model
|
17 |
+
)
|
18 |
+
|
19 |
+
ctx = Context(agent)
|
20 |
+
result = await agent.run(input_prompt,
|
21 |
+
ctx=ctx
|
22 |
+
)
|
23 |
+
return str(result)
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
@@ -1,40 +1,61 @@
|
|
1 |
-
|
2 |
import os
|
3 |
import gradio as gr
|
4 |
import requests
|
5 |
-
import inspect
|
6 |
import pandas as pd
|
7 |
-
from agent import
|
8 |
from tools import search_tool, image_tool, video_tool
|
9 |
from llama_index.llms.ollama import Ollama
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
|
|
12 |
|
13 |
|
14 |
# (Keep Constants as is)
|
15 |
# --- Constants ---
|
16 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
17 |
|
18 |
-
# # --- Basic Agent Definition ---
|
19 |
-
# # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
20 |
-
# class BasicAgent:
|
21 |
-
# def __init__(self):
|
22 |
-
# print("BasicAgent initialized.")
|
23 |
-
# def __call__(self, question: str) -> str:
|
24 |
-
# print(f"Agent received question (first 50 chars): {question[:50]}...")
|
25 |
-
# fixed_answer = "This is a default answer."
|
26 |
-
# print(f"Agent returning fixed answer: {fixed_answer}")
|
27 |
-
# return fixed_answer
|
28 |
tools = [search_tool, image_tool, video_tool]
|
29 |
|
30 |
-
llm = Ollama(
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
-
|
38 |
|
39 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
40 |
"""
|
@@ -98,7 +119,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
98 |
print(f"Skipping item with missing task_id or question: {item}")
|
99 |
continue
|
100 |
try:
|
101 |
-
submitted_answer =
|
102 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
103 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
104 |
except Exception as e:
|
|
|
1 |
+
|
2 |
import os
|
3 |
import gradio as gr
|
4 |
import requests
|
|
|
5 |
import pandas as pd
|
6 |
+
from agent import Basic_Agent
|
7 |
from tools import search_tool, image_tool, video_tool
|
8 |
from llama_index.llms.ollama import Ollama
|
9 |
+
import asyncio
|
10 |
+
from together import Together
|
11 |
+
from dotenv import load_dotenv
|
12 |
+
from llama_index.llms.together import TogetherLLM
|
13 |
+
|
14 |
+
|
15 |
+
load_dotenv()
|
16 |
|
17 |
+
TOGETHER_API_KEY = os.getenv("TOGETHER_API_KEY")
|
18 |
|
19 |
|
20 |
# (Keep Constants as is)
|
21 |
# --- Constants ---
|
22 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
tools = [search_tool, image_tool, video_tool]
|
25 |
|
26 |
+
# llm = Ollama(
|
27 |
+
# model="llama3.1",
|
28 |
+
# request_timeout=120.0)
|
29 |
+
|
30 |
+
together_client = Together(timeout=120)
|
31 |
+
|
32 |
+
# creating an LLM wrapper around our client
|
33 |
+
|
34 |
+
# class TogetherLLMWrapper():
|
35 |
+
# def __init__(self, model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'):
|
36 |
+
# self.model = model
|
37 |
+
|
38 |
+
# def __call__(self, prompt:str) -> str:
|
39 |
+
# response = together_client.completions.create(
|
40 |
+
# prompt=prompt,
|
41 |
+
# model=self.model,
|
42 |
+
# max_tokens=1000,
|
43 |
+
# temperature=0.7,
|
44 |
+
# top_p=0.9,
|
45 |
+
# )
|
46 |
+
# return response.choices[0].text # type: ignore
|
47 |
+
|
48 |
+
# llm = TogetherLLMWrapper()
|
49 |
+
|
50 |
|
51 |
+
llm = TogetherLLM(
|
52 |
+
model='meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo',
|
53 |
+
api_key=TOGETHER_API_KEY,
|
54 |
+
temperature=0.7,
|
55 |
+
max_tokens=1000
|
56 |
+
)
|
57 |
|
58 |
+
agent = Basic_Agent(tool=tools, llm_model=llm)
|
59 |
|
60 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
61 |
"""
|
|
|
119 |
print(f"Skipping item with missing task_id or question: {item}")
|
120 |
continue
|
121 |
try:
|
122 |
+
submitted_answer = asyncio.run(agent(question_text))
|
123 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
124 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
125 |
except Exception as e:
|
requirements.txt
CHANGED
@@ -7,4 +7,6 @@ python-dotenv
|
|
7 |
llama-index
|
8 |
llama-index-llms-ollama
|
9 |
llama-index-multi-modal-llms-ollama
|
10 |
-
opencv-python
|
|
|
|
|
|
7 |
llama-index
|
8 |
llama-index-llms-ollama
|
9 |
llama-index-multi-modal-llms-ollama
|
10 |
+
opencv-python
|
11 |
+
together
|
12 |
+
llama-index-llms-together
|
tools.py
CHANGED
@@ -15,7 +15,6 @@ brave_api_key = os.getenv('BRAVE_API_KEY', 'No key')
|
|
15 |
mm_model = OllamaMultiModal(
|
16 |
model='llava',
|
17 |
temperature=0.7,
|
18 |
-
base_url="http://localhost:7860",
|
19 |
)
|
20 |
|
21 |
search_tool_spec = BraveSearchToolSpec(api_key=brave_api_key)
|
|
|
15 |
mm_model = OllamaMultiModal(
|
16 |
model='llava',
|
17 |
temperature=0.7,
|
|
|
18 |
)
|
19 |
|
20 |
search_tool_spec = BraveSearchToolSpec(api_key=brave_api_key)
|