anu151105 commited on
Commit
9b047f5
·
verified ·
1 Parent(s): 966bd38

Upload 6 files

Browse files
Files changed (6) hide show
  1. Dockerfile +10 -0
  2. agent_memory.py +181 -0
  3. agent_reasoning.py +223 -0
  4. agent_tasks.py +244 -0
  5. app.py +742 -0
  6. requirements.txt +12 -0
Dockerfile ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ WORKDIR /app
4
+
5
+ COPY requirements.txt .
6
+ RUN pip install --no-cache-dir -r requirements.txt
7
+
8
+ COPY . .
9
+
10
+ CMD ["python", "app.py"]
agent_memory.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from typing import Dict, List, Any, Optional, Union
3
+
4
+ class MemoryManager:
5
+ """Memory management for the autonomous AI agent
6
+
7
+ This module provides capabilities for:
8
+ 1. Storing and retrieving conversation history
9
+ 2. Managing context windows
10
+ 3. Implementing forgetting mechanisms
11
+ 4. Prioritizing important information
12
+ """
13
+
14
+ def __init__(self, max_history_length: int = 20):
15
+ """Initialize the memory manager
16
+
17
+ Args:
18
+ max_history_length: Maximum number of conversation turns to store
19
+ """
20
+ self.conversation_history = []
21
+ self.max_history_length = max_history_length
22
+ self.important_facts = []
23
+ self.max_facts = 50
24
+ self.session_data = {}
25
+
26
+ def add_message(self, role: str, content: str) -> None:
27
+ """Add a message to the conversation history
28
+
29
+ Args:
30
+ role: The role of the message sender (user or assistant)
31
+ content: The content of the message
32
+ """
33
+ self.conversation_history.append({
34
+ "role": role,
35
+ "content": content,
36
+ "timestamp": time.time()
37
+ })
38
+
39
+ # Trim history if it gets too long
40
+ if len(self.conversation_history) > self.max_history_length * 2:
41
+ self.conversation_history = self.conversation_history[-self.max_history_length*2:]
42
+
43
+ def get_conversation_history(self, max_turns: Optional[int] = None) -> List[Dict[str, Any]]:
44
+ """Get the conversation history
45
+
46
+ Args:
47
+ max_turns: Maximum number of turns to retrieve (None for all)
48
+
49
+ Returns:
50
+ List of conversation messages
51
+ """
52
+ if max_turns is None:
53
+ return self.conversation_history
54
+ else:
55
+ # Calculate the number of messages (2 messages per turn)
56
+ max_messages = max_turns * 2
57
+ return self.conversation_history[-max_messages:]
58
+
59
+ def format_conversation_for_prompt(self, max_turns: Optional[int] = None) -> str:
60
+ """Format the conversation history for inclusion in a prompt
61
+
62
+ Args:
63
+ max_turns: Maximum number of turns to include
64
+
65
+ Returns:
66
+ Formatted conversation string
67
+ """
68
+ history = self.get_conversation_history(max_turns)
69
+ formatted = ""
70
+
71
+ for msg in history:
72
+ formatted += f"{msg['role']}: {msg['content']}\n"
73
+
74
+ return formatted
75
+
76
+ def add_important_fact(self, fact: str, source: str) -> None:
77
+ """Add an important fact to memory
78
+
79
+ Args:
80
+ fact: The important fact to remember
81
+ source: The source of the fact (e.g., user, inference)
82
+ """
83
+ self.important_facts.append({
84
+ "fact": fact,
85
+ "source": source,
86
+ "timestamp": time.time()
87
+ })
88
+
89
+ # Trim facts if they get too numerous
90
+ if len(self.important_facts) > self.max_facts:
91
+ self.important_facts = self.important_facts[-self.max_facts:]
92
+
93
+ def get_important_facts(self) -> List[Dict[str, Any]]:
94
+ """Get the list of important facts
95
+
96
+ Returns:
97
+ List of important facts
98
+ """
99
+ return self.important_facts
100
+
101
+ def format_facts_for_prompt(self) -> str:
102
+ """Format important facts for inclusion in a prompt
103
+
104
+ Returns:
105
+ Formatted facts string
106
+ """
107
+ if not self.important_facts:
108
+ return ""
109
+
110
+ formatted = "Important information I know about the user and context:\n"
111
+
112
+ # Sort facts by timestamp (newest first)
113
+ sorted_facts = sorted(self.important_facts, key=lambda x: x.get('timestamp', 0), reverse=True)
114
+
115
+ # Group facts by source
116
+ user_facts = [fact for fact in sorted_facts if fact.get('source') == 'user']
117
+ inference_facts = [fact for fact in sorted_facts if fact.get('source') == 'inference']
118
+
119
+ # Add user facts first (they're more reliable)
120
+ for i, fact in enumerate(user_facts):
121
+ formatted += f"{i+1}. {fact['fact']} (from user)\n"
122
+
123
+ # Then add inference facts
124
+ start_idx = len(user_facts) + 1
125
+ for i, fact in enumerate(inference_facts):
126
+ formatted += f"{start_idx + i}. {fact['fact']} (inferred)\n"
127
+
128
+ return formatted
129
+
130
+ def store_session_data(self, key: str, value: Any) -> None:
131
+ """Store data for the current session
132
+
133
+ Args:
134
+ key: The key to store the data under
135
+ value: The data to store
136
+ """
137
+ self.session_data[key] = {
138
+ "value": value,
139
+ "timestamp": time.time()
140
+ }
141
+
142
+ def get_session_data(self, key: str) -> Optional[Any]:
143
+ """Retrieve data from the current session
144
+
145
+ Args:
146
+ key: The key to retrieve data for
147
+
148
+ Returns:
149
+ The stored data, or None if not found
150
+ """
151
+ if key in self.session_data:
152
+ return self.session_data[key]["value"]
153
+ else:
154
+ return None
155
+
156
+ def clear_conversation_history(self) -> None:
157
+ """Clear the conversation history"""
158
+ self.conversation_history = []
159
+
160
+ def clear_all_memory(self) -> None:
161
+ """Clear all memory (conversation history, facts, and session data)"""
162
+ self.conversation_history = []
163
+ self.important_facts = []
164
+ self.session_data = {}
165
+
166
+ def get_memory_stats(self) -> Dict[str, Any]:
167
+ """Get statistics about the agent's memory usage
168
+
169
+ Returns:
170
+ Dictionary containing memory statistics
171
+ """
172
+ return {
173
+ "conversation_turns": len(self.conversation_history) // 2,
174
+ "important_facts": len(self.important_facts),
175
+ "session_data_keys": list(self.session_data.keys()),
176
+ "memory_usage": {
177
+ "conversation": len(str(self.conversation_history)),
178
+ "facts": len(str(self.important_facts)),
179
+ "session": len(str(self.session_data))
180
+ }
181
+ }
agent_reasoning.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import Dict, List, Any, Optional, Union
3
+
4
+ class ReasoningEngine:
5
+ """Reasoning engine for the autonomous AI agent
6
+
7
+ This module provides advanced reasoning capabilities including:
8
+ 1. Chain-of-thought reasoning
9
+ 2. Task decomposition
10
+ 3. Self-reflection
11
+ 4. Decision making
12
+ """
13
+
14
+ def __init__(self, model, tokenizer, device="cpu"):
15
+ """Initialize the reasoning engine
16
+
17
+ Args:
18
+ model: The language model to use for reasoning
19
+ tokenizer: The tokenizer for the language model
20
+ device: The device to run the model on (cpu or cuda)
21
+ """
22
+ self.model = model
23
+ self.tokenizer = tokenizer
24
+ self.device = device
25
+
26
+ def generate_text(self, prompt: str, max_length: int = 512, temperature: float = 0.7) -> str:
27
+ """Generate text using the language model
28
+
29
+ Args:
30
+ prompt: The input prompt for the model
31
+ max_length: Maximum length of the generated text
32
+ temperature: Temperature for sampling (higher = more random)
33
+
34
+ Returns:
35
+ Generated text response
36
+ """
37
+ inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
38
+
39
+ # Generate response
40
+ with torch.no_grad():
41
+ outputs = self.model.generate(
42
+ inputs["input_ids"],
43
+ max_length=max_length,
44
+ num_return_sequences=1,
45
+ temperature=temperature,
46
+ top_p=0.9,
47
+ do_sample=True
48
+ )
49
+
50
+ # Decode and return the response
51
+ response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
52
+ return response
53
+
54
+ def chain_of_thought(self, query: str) -> Dict[str, str]:
55
+ """Implement chain-of-thought reasoning
56
+
57
+ Args:
58
+ query: User query to reason about
59
+
60
+ Returns:
61
+ Dictionary containing reasoning steps and final answer
62
+ """
63
+ # Construct a prompt that encourages step-by-step reasoning
64
+ reasoning_prompt = f"""I am an autonomous AI agent called ResuRank. I need to think through this step by step to provide the best response.
65
+
66
+ Query: {query}
67
+
68
+ Let me reason through this carefully:
69
+ 1. First, I'll identify the key parts of this query.
70
+ 2. Then, I'll consider what information I need to answer it.
71
+ 3. Next, I'll analyze the implications and context.
72
+ 4. Finally, I'll formulate a comprehensive response.
73
+
74
+ Step-by-step reasoning:
75
+ 1. """
76
+
77
+ # Generate the reasoning steps
78
+ reasoning = self.generate_text(reasoning_prompt, max_length=1024)
79
+
80
+ # Analyze the reasoning for completeness
81
+ if len(reasoning.split('\n')) < 3 or len(reasoning) < 100:
82
+ # If reasoning is too short, try to expand it
83
+ expansion_prompt = f"""My current reasoning is:
84
+ {reasoning}
85
+
86
+ Let me expand on this with more detailed analysis:
87
+ """
88
+ additional_reasoning = self.generate_text(expansion_prompt, max_length=512)
89
+ reasoning = f"{reasoning}\n\n{additional_reasoning}"
90
+
91
+ # Extract the final answer after reasoning
92
+ answer_prompt = f"""Based on my detailed reasoning:
93
+ {reasoning}
94
+
95
+ I will now provide a clear, helpful, and comprehensive response to the query: {query}
96
+
97
+ My final answer is:"""
98
+
99
+ final_answer = self.generate_text(answer_prompt, max_length=768)
100
+
101
+ # Check if the answer addresses the query adequately
102
+ if len(final_answer) < 50:
103
+ # If answer is too short, try to improve it
104
+ improvement_prompt = f"""My current answer is:
105
+ {final_answer}
106
+
107
+ This answer is too brief. Let me provide a more comprehensive response to the query: {query}
108
+
109
+ Improved answer:"""
110
+ final_answer = self.generate_text(improvement_prompt, max_length=768)
111
+
112
+ return {
113
+ "reasoning": reasoning,
114
+ "answer": final_answer
115
+ }
116
+
117
+ def decompose_task(self, task_description: str) -> List[str]:
118
+ """Decompose a complex task into smaller subtasks
119
+
120
+ Args:
121
+ task_description: Description of the task to decompose
122
+
123
+ Returns:
124
+ List of subtask descriptions
125
+ """
126
+ decomposition_prompt = f"""I need to break down this complex task into smaller, manageable subtasks:
127
+
128
+ Task: {task_description}
129
+
130
+ Subtasks:
131
+ 1. """
132
+
133
+ decomposition = self.generate_text(decomposition_prompt, max_length=1024)
134
+
135
+ # Parse the decomposition into a list of subtasks
136
+ subtasks = []
137
+ for line in decomposition.split('\n'):
138
+ line = line.strip()
139
+ if line and (line[0].isdigit() or line.startswith('- ')):
140
+ # Remove numbering or bullet points
141
+ cleaned_line = re.sub(r'^\d+\.\s*|^-\s*', '', line).strip()
142
+ if cleaned_line:
143
+ subtasks.append(cleaned_line)
144
+
145
+ return subtasks
146
+
147
+ def self_reflect(self, action: str, outcome: str) -> Dict[str, str]:
148
+ """Perform self-reflection on an action and its outcome
149
+
150
+ Args:
151
+ action: The action that was taken
152
+ outcome: The outcome of the action
153
+
154
+ Returns:
155
+ Dictionary containing reflection and improvement suggestions
156
+ """
157
+ reflection_prompt = f"""I need to reflect on this action and its outcome:
158
+
159
+ Action: {action}
160
+ Outcome: {outcome}
161
+
162
+ Reflection:
163
+ """
164
+
165
+ reflection = self.generate_text(reflection_prompt, max_length=512)
166
+
167
+ improvement_prompt = f"""Based on my reflection:
168
+ {reflection}
169
+
170
+ How I can improve next time:
171
+ """
172
+
173
+ improvement = self.generate_text(improvement_prompt, max_length=512)
174
+
175
+ return {
176
+ "reflection": reflection,
177
+ "improvement": improvement
178
+ }
179
+
180
+ def make_decision(self, options: List[str], context: str) -> Dict[str, Any]:
181
+ """Make a decision among multiple options
182
+
183
+ Args:
184
+ options: List of options to choose from
185
+ context: Context information for the decision
186
+
187
+ Returns:
188
+ Dictionary containing the chosen option and reasoning
189
+ """
190
+ options_text = "\n".join([f"{i+1}. {option}" for i, option in enumerate(options)])
191
+
192
+ decision_prompt = f"""I need to make a decision based on the following context and options:
193
+
194
+ Context: {context}
195
+
196
+ Options:
197
+ {options_text}
198
+
199
+ Let me analyze each option:
200
+ """
201
+
202
+ analysis = self.generate_text(decision_prompt, max_length=1024)
203
+
204
+ conclusion_prompt = f"""Based on my analysis:
205
+ {analysis}
206
+
207
+ The best option is number:"""
208
+
209
+ conclusion = self.generate_text(conclusion_prompt, max_length=128)
210
+
211
+ # Try to extract the chosen option number
212
+ try:
213
+ option_num = int(re.search(r'\d+', conclusion).group()) - 1
214
+ chosen_option = options[option_num] if 0 <= option_num < len(options) else options[0]
215
+ except (AttributeError, ValueError, IndexError):
216
+ # Default to first option if parsing fails
217
+ chosen_option = options[0]
218
+
219
+ return {
220
+ "chosen_option": chosen_option,
221
+ "analysis": analysis,
222
+ "conclusion": conclusion
223
+ }
agent_tasks.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import time
4
+ import json
5
+ from typing import Dict, List, Any, Optional, Union
6
+
7
+ class TaskExecutor:
8
+ """Task execution engine for the autonomous AI agent
9
+
10
+ This module provides capabilities for executing various tasks including:
11
+ 1. Task planning and execution
12
+ 2. Progress tracking
13
+ 3. Result formatting
14
+ 4. Error handling
15
+ """
16
+
17
+ def __init__(self, reasoning_engine):
18
+ """Initialize the task executor
19
+
20
+ Args:
21
+ reasoning_engine: The reasoning engine to use for task planning
22
+ """
23
+ self.reasoning_engine = reasoning_engine
24
+ self.current_task = None
25
+ self.task_status = "idle"
26
+ self.task_history = []
27
+ self.max_history_length = 10
28
+
29
+ def execute_task(self, task_description: str) -> Dict[str, Any]:
30
+ """Execute a task based on the description
31
+
32
+ Args:
33
+ task_description: Description of the task to execute
34
+
35
+ Returns:
36
+ Dictionary containing task results and status
37
+ """
38
+ self.current_task = task_description
39
+ self.task_status = "in_progress"
40
+ start_time = time.time()
41
+
42
+ try:
43
+ # First, analyze the task to understand its requirements and constraints
44
+ analysis_prompt = f"""I need to analyze this task to understand its requirements and constraints:
45
+
46
+ Task: {task_description}
47
+
48
+ Task Analysis:
49
+ 1. What is the main objective of this task?
50
+ 2. What are the key requirements?
51
+ 3. What constraints or limitations should I be aware of?
52
+ 4. What resources or information do I need to complete this task?
53
+
54
+ Analysis:"""
55
+
56
+ task_analysis = self.reasoning_engine.generate_text(analysis_prompt, max_length=768)
57
+
58
+ # Decompose the task into subtasks with the analysis in mind
59
+ decomposition_prompt = f"""Based on my analysis of the task:
60
+ {task_analysis}
61
+
62
+ I need to break down this task into smaller, manageable subtasks:
63
+
64
+ Task: {task_description}
65
+
66
+ Subtasks:
67
+ 1. """
68
+
69
+ decomposition = self.reasoning_engine.generate_text(decomposition_prompt, max_length=1024)
70
+
71
+ # Parse the decomposition into a list of subtasks
72
+ subtasks = []
73
+ for line in decomposition.split('\n'):
74
+ line = line.strip()
75
+ if line and (line[0].isdigit() or line.startswith('- ')):
76
+ # Remove numbering or bullet points
77
+ cleaned_line = re.sub(r'^\d+\.\s*|^-\s*', '', line).strip()
78
+ if cleaned_line:
79
+ subtasks.append(cleaned_line)
80
+
81
+ # If parsing failed, use the reasoning engine's decompose_task method as fallback
82
+ if not subtasks:
83
+ subtasks = self.reasoning_engine.decompose_task(task_description)
84
+
85
+ # Generate a detailed plan for executing the task
86
+ planning_prompt = f"""I need to create a detailed plan to execute this task:
87
+ {task_description}
88
+
89
+ Task Analysis:
90
+ {task_analysis}
91
+
92
+ The task has been broken down into these subtasks:
93
+ {json.dumps(subtasks, indent=2)}
94
+
95
+ Detailed step-by-step plan (including how to handle potential issues):
96
+ 1. """
97
+
98
+ plan = self.reasoning_engine.generate_text(planning_prompt, max_length=1024)
99
+
100
+ # Track progress of each subtask with more detailed execution
101
+ subtask_results = []
102
+ for i, subtask in enumerate(subtasks):
103
+ # Update status
104
+ self.task_status = f"in_progress ({i+1}/{len(subtasks)})"
105
+
106
+ # Execute the subtask with more context
107
+ subtask_prompt = f"""I am executing this subtask as part of the larger task:
108
+
109
+ Main Task: {task_description}
110
+
111
+ Current Subtask ({i+1}/{len(subtasks)}): {subtask}
112
+
113
+ Previous Results: {json.dumps([r['result'] for r in subtask_results], indent=2) if subtask_results else 'None yet'}
114
+
115
+ I will now execute this subtask carefully and report the detailed results:"""
116
+
117
+ result = self.reasoning_engine.generate_text(subtask_prompt, max_length=768)
118
+
119
+ # Evaluate the quality of the result
120
+ evaluation_prompt = f"""I need to evaluate the quality of my execution of this subtask:
121
+
122
+ Subtask: {subtask}
123
+
124
+ Execution Result: {result}
125
+
126
+ Evaluation (rate from 1-10 and explain):"""
127
+
128
+ evaluation = self.reasoning_engine.generate_text(evaluation_prompt, max_length=256)
129
+
130
+ subtask_results.append({
131
+ "subtask": subtask,
132
+ "result": result,
133
+ "evaluation": evaluation
134
+ })
135
+
136
+ # Compile the final results with synthesis
137
+ compilation_prompt = f"""I have executed all subtasks for the main task:
138
+ {task_description}
139
+
140
+ Here are the results of each subtask:
141
+ {json.dumps(subtask_results, indent=2)}
142
+
143
+ I need to synthesize these results into a coherent final result that addresses the original task completely.
144
+
145
+ Final synthesized result:"""
146
+
147
+ final_result = self.reasoning_engine.generate_text(compilation_prompt, max_length=1024)
148
+
149
+ # Self-reflection on the task execution
150
+ reflection_prompt = f"""I need to reflect on my execution of this task:
151
+
152
+ Task: {task_description}
153
+
154
+ My approach: {plan}
155
+
156
+ Final result: {final_result}
157
+
158
+ Reflection on what went well and what could be improved:"""
159
+
160
+ reflection = self.reasoning_engine.generate_text(reflection_prompt, max_length=512)
161
+
162
+ self.task_status = "completed"
163
+ execution_time = time.time() - start_time
164
+
165
+ # Add to task history
166
+ task_record = {
167
+ "task": task_description,
168
+ "plan": plan,
169
+ "subtasks": subtask_results,
170
+ "result": final_result,
171
+ "reflection": reflection,
172
+ "status": self.task_status,
173
+ "execution_time": execution_time,
174
+ "timestamp": time.time()
175
+ }
176
+
177
+ self.task_history.append(task_record)
178
+
179
+ # Trim history if it gets too long
180
+ if len(self.task_history) > self.max_history_length:
181
+ self.task_history = self.task_history[-self.max_history_length:]
182
+
183
+ return task_record
184
+
185
+ except Exception as e:
186
+ self.task_status = "failed"
187
+ error_message = str(e)
188
+
189
+ # Add failed task to history
190
+ task_record = {
191
+ "task": task_description,
192
+ "status": self.task_status,
193
+ "error": error_message,
194
+ "timestamp": time.time()
195
+ }
196
+
197
+ self.task_history.append(task_record)
198
+
199
+ return task_record
200
+
201
+ def get_task_status(self) -> Dict[str, Any]:
202
+ """Get the current status of task execution
203
+
204
+ Returns:
205
+ Dictionary containing task status information
206
+ """
207
+ return {
208
+ "current_task": self.current_task,
209
+ "status": self.task_status,
210
+ "history_length": len(self.task_history)
211
+ }
212
+
213
+ def get_task_history(self) -> List[Dict[str, Any]]:
214
+ """Get the history of executed tasks
215
+
216
+ Returns:
217
+ List of task records
218
+ """
219
+ return self.task_history
220
+
221
+ def cancel_task(self) -> Dict[str, Any]:
222
+ """Cancel the currently executing task
223
+
224
+ Returns:
225
+ Dictionary containing cancellation status
226
+ """
227
+ if self.task_status == "in_progress":
228
+ self.task_status = "cancelled"
229
+
230
+ # Update the last task record in history
231
+ if self.task_history:
232
+ self.task_history[-1]["status"] = "cancelled"
233
+
234
+ return {
235
+ "task": self.current_task,
236
+ "status": self.task_status,
237
+ "message": "Task cancelled successfully"
238
+ }
239
+ else:
240
+ return {
241
+ "task": self.current_task,
242
+ "status": self.task_status,
243
+ "message": "No task in progress to cancel"
244
+ }
app.py ADDED
@@ -0,0 +1,742 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import torch
4
+ import re
5
+ import time
6
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
7
+ from huggingface_hub import hf_hub_download, snapshot_download
8
+ import json
9
+ from typing import Dict, List, Any, Optional, Union
10
+
11
+ # Import agent modules
12
+ from agent_reasoning import ReasoningEngine
13
+ from agent_tasks import TaskExecutor
14
+ from agent_memory import MemoryManager
15
+
16
+ class ResuRankAgent:
17
+ """Autonomous AI Agent similar to Manus AI
18
+
19
+ This agent can:
20
+ 1. Process user queries and generate responses
21
+ 2. Perform reasoning through chain-of-thought
22
+ 3. Execute tasks based on user instructions
23
+ 4. Maintain conversation context
24
+ """
25
+
26
+ def __init__(self, model_id="google/flan-t5-large", use_cache=True):
27
+ """Initialize the ResuRank Agent
28
+
29
+ Args:
30
+ model_id: Hugging Face model ID to use for the agent
31
+ use_cache: Whether to use cached models from Hugging Face Hub
32
+ """
33
+ self.model_id = model_id
34
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
35
+ print(f"Using device: {self.device}")
36
+
37
+ # Load model and tokenizer from Hugging Face Hub
38
+ print(f"Loading model {model_id} from Hugging Face Hub...")
39
+ try:
40
+ # Use cached models if available
41
+ if use_cache:
42
+ print("Using cached models if available...")
43
+ self.tokenizer = AutoTokenizer.from_pretrained(model_id, cache_dir="./.cache")
44
+ self.model = AutoModelForCausalLM.from_pretrained(
45
+ model_id,
46
+ torch_dtype=torch.float16 if self.device == "cuda" else torch.float32,
47
+ low_cpu_mem_usage=True,
48
+ device_map="auto",
49
+ cache_dir="./.cache"
50
+ )
51
+ else:
52
+ # Download models directly from Hugging Face Hub
53
+ print("Downloading models from Hugging Face Hub...")
54
+ self.tokenizer = AutoTokenizer.from_pretrained(model_id)
55
+ self.model = AutoModelForCausalLM.from_pretrained(
56
+ model_id,
57
+ torch_dtype=torch.float16 if self.device == "cuda" else torch.float32,
58
+ low_cpu_mem_usage=True,
59
+ device_map="auto"
60
+ )
61
+
62
+ print(f"Successfully loaded model {model_id}")
63
+ except Exception as e:
64
+ print(f"Error loading model: {str(e)}")
65
+ print("Falling back to smaller model...")
66
+ fallback_model = "google/flan-t5-base"
67
+ self.model_id = fallback_model
68
+ self.tokenizer = AutoTokenizer.from_pretrained(fallback_model, cache_dir="./.cache")
69
+ self.model = AutoModelForCausalLM.from_pretrained(
70
+ fallback_model,
71
+ torch_dtype=torch.float16 if self.device == "cuda" else torch.float32,
72
+ low_cpu_mem_usage=True,
73
+ device_map="auto",
74
+ cache_dir="./.cache"
75
+ )
76
+
77
+ # Initialize agent components
78
+ self.reasoning_engine = ReasoningEngine(self.model, self.tokenizer, self.device)
79
+ self.memory_manager = MemoryManager(max_history_length=20)
80
+ self.task_executor = TaskExecutor(self.reasoning_engine)
81
+
82
+ def process_query(self, query: str, use_reasoning: bool = True) -> Dict[str, Any]:
83
+ """Process a user query and generate a response
84
+
85
+ Args:
86
+ query: User query text
87
+ use_reasoning: Whether to use chain-of-thought reasoning
88
+
89
+ Returns:
90
+ Dictionary containing response and metadata
91
+ """
92
+ # Add query to conversation history
93
+ self.memory_manager.add_message("user", query)
94
+
95
+ start_time = time.time()
96
+
97
+ # Check if this is a task execution request
98
+ is_task_request = self._is_task_request(query)
99
+
100
+ # Process the query with appropriate method
101
+ if is_task_request:
102
+ # Handle as a task execution request
103
+ task_result = self.execute_task(query)
104
+ response = f"I've executed your task. {task_result.get('result', '')}\n\nStatus: {task_result.get('status', 'unknown')}"
105
+ reasoning = task_result.get('plan', '')
106
+ elif use_reasoning:
107
+ # Use chain-of-thought reasoning
108
+ # Enhance with context from memory
109
+ facts = self.memory_manager.format_facts_for_prompt()
110
+ context = self.memory_manager.format_conversation_for_prompt(max_turns=5)
111
+
112
+ # Create an enhanced query with context
113
+ enhanced_query = f"{facts}\n\nRecent conversation:\n{context}\n\nCurrent query: {query}"
114
+
115
+ result = self.reasoning_engine.chain_of_thought(enhanced_query)
116
+ response = result["answer"]
117
+ reasoning = result["reasoning"]
118
+ else:
119
+ # Simple response generation without reasoning
120
+ conversation_prompt = self.memory_manager.format_conversation_for_prompt(max_turns=10)
121
+ facts_prompt = self.memory_manager.format_facts_for_prompt()
122
+
123
+ prompt = f"{facts_prompt}\n\n{conversation_prompt}\nassistant: "
124
+
125
+ response = self.reasoning_engine.generate_text(prompt)
126
+ reasoning = None
127
+
128
+ # Add response to conversation history
129
+ self.memory_manager.add_message("assistant", response)
130
+
131
+ # Extract any important facts from the conversation
132
+ self._extract_facts(query, response)
133
+
134
+ processing_time = time.time() - start_time
135
+
136
+ return {
137
+ "response": response,
138
+ "reasoning": reasoning,
139
+ "processing_time": processing_time,
140
+ "timestamp": time.time()
141
+ }
142
+
143
+ def _is_task_request(self, query: str) -> bool:
144
+ """Determine if a query is a task execution request
145
+
146
+ Args:
147
+ query: The user query
148
+
149
+ Returns:
150
+ True if the query appears to be a task request, False otherwise
151
+ """
152
+ # Keywords that suggest a task execution request
153
+ task_keywords = [
154
+ "execute", "perform", "run", "do", "complete", "finish",
155
+ "task", "job", "work", "action", "operation", "function",
156
+ "can you", "please", "help me", "i need", "i want"
157
+ ]
158
+
159
+ # Check if query contains task-related keywords
160
+ query_lower = query.lower()
161
+ for keyword in task_keywords:
162
+ if keyword in query_lower:
163
+ return True
164
+
165
+ return False
166
+
167
+ def _extract_facts(self, query: str, response: str) -> None:
168
+ """Extract important facts from the conversation
169
+
170
+ Args:
171
+ query: User query
172
+ response: Agent response
173
+ """
174
+ # Extract personal information
175
+ self._extract_personal_info(query)
176
+
177
+ # Extract preferences
178
+ self._extract_preferences(query)
179
+
180
+ # Extract task-related information
181
+ self._extract_task_info(query)
182
+
183
+ # Use the reasoning engine to identify important facts
184
+ self._extract_with_reasoning(query, response)
185
+
186
+ def _extract_personal_info(self, text: str) -> None:
187
+ """Extract personal information from text
188
+
189
+ Args:
190
+ text: Text to extract information from
191
+ """
192
+ text_lower = text.lower()
193
+
194
+ # Extract name
195
+ if "my name is" in text_lower or "i am called" in text_lower or "i'm called" in text_lower:
196
+ name_patterns = [
197
+ r"my name is ([\w\s]+)[.\,]?",
198
+ r"i am called ([\w\s]+)[.\,]?",
199
+ r"i'm called ([\w\s]+)[.\,]?"
200
+ ]
201
+
202
+ for pattern in name_patterns:
203
+ name_match = re.search(pattern, text_lower)
204
+ if name_match:
205
+ name = name_match.group(1).strip()
206
+ self.memory_manager.add_important_fact(f"User's name is {name}", "user")
207
+ break
208
+
209
+ # Extract location
210
+ if "i am from" in text_lower or "i'm from" in text_lower or "i live in" in text_lower:
211
+ location_patterns = [
212
+ r"i am from ([\w\s]+)[.\,]?",
213
+ r"i'm from ([\w\s]+)[.\,]?",
214
+ r"i live in ([\w\s]+)[.\,]?"
215
+ ]
216
+
217
+ for pattern in location_patterns:
218
+ location_match = re.search(pattern, text_lower)
219
+ if location_match:
220
+ location = location_match.group(1).strip()
221
+ self.memory_manager.add_important_fact(f"User is from {location}", "user")
222
+ break
223
+
224
+ # Extract profession/occupation
225
+ if "i work as" in text_lower or "i am a" in text_lower or "i'm a" in text_lower:
226
+ profession_patterns = [
227
+ r"i work as a[n]? ([\w\s]+)[.\,]?",
228
+ r"i am a[n]? ([\w\s]+)[.\,]?",
229
+ r"i'm a[n]? ([\w\s]+)[.\,]?"
230
+ ]
231
+
232
+ for pattern in profession_patterns:
233
+ profession_match = re.search(pattern, text_lower)
234
+ if profession_match:
235
+ profession = profession_match.group(1).strip()
236
+ self.memory_manager.add_important_fact(f"User works as a {profession}", "user")
237
+ break
238
+
239
+ def _extract_preferences(self, text: str) -> None:
240
+ """Extract user preferences from text
241
+
242
+ Args:
243
+ text: Text to extract information from
244
+ """
245
+ text_lower = text.lower()
246
+
247
+ # Extract likes
248
+ if "i like" in text_lower or "i love" in text_lower or "i enjoy" in text_lower:
249
+ like_patterns = [
250
+ r"i like ([\w\s]+)[.\,]?",
251
+ r"i love ([\w\s]+)[.\,]?",
252
+ r"i enjoy ([\w\s]+)[.\,]?"
253
+ ]
254
+
255
+ for pattern in like_patterns:
256
+ like_match = re.search(pattern, text_lower)
257
+ if like_match:
258
+ like = like_match.group(1).strip()
259
+ self.memory_manager.add_important_fact(f"User likes {like}", "user")
260
+ break
261
+
262
+ # Extract dislikes
263
+ if "i don't like" in text_lower or "i hate" in text_lower or "i dislike" in text_lower:
264
+ dislike_patterns = [
265
+ r"i don't like ([\w\s]+)[.\,]?",
266
+ r"i hate ([\w\s]+)[.\,]?",
267
+ r"i dislike ([\w\s]+)[.\,]?"
268
+ ]
269
+
270
+ for pattern in dislike_patterns:
271
+ dislike_match = re.search(pattern, text_lower)
272
+ if dislike_match:
273
+ dislike = dislike_match.group(1).strip()
274
+ self.memory_manager.add_important_fact(f"User dislikes {dislike}", "user")
275
+ break
276
+
277
+ def _extract_task_info(self, text: str) -> None:
278
+ """Extract task-related information from text
279
+
280
+ Args:
281
+ text: Text to extract information from
282
+ """
283
+ text_lower = text.lower()
284
+
285
+ # Extract goals
286
+ if "my goal is" in text_lower or "i want to" in text_lower or "i need to" in text_lower:
287
+ goal_patterns = [
288
+ r"my goal is to ([\w\s]+)[.\,]?",
289
+ r"i want to ([\w\s]+)[.\,]?",
290
+ r"i need to ([\w\s]+)[.\,]?"
291
+ ]
292
+
293
+ for pattern in goal_patterns:
294
+ goal_match = re.search(pattern, text_lower)
295
+ if goal_match:
296
+ goal = goal_match.group(1).strip()
297
+ self.memory_manager.add_important_fact(f"User's goal is to {goal}", "user")
298
+ break
299
+
300
+ def _extract_with_reasoning(self, query: str, response: str) -> None:
301
+ """Use the reasoning engine to extract important facts
302
+
303
+ Args:
304
+ query: User query
305
+ response: Agent response
306
+ """
307
+ # Only use this for longer queries to avoid unnecessary processing
308
+ if len(query) < 50:
309
+ return
310
+
311
+ extraction_prompt = f"""Extract important facts from this conversation:
312
+
313
+ User: {query}
314
+ Assistant: {response}
315
+
316
+ List of important facts (one per line):
317
+ 1. """
318
+
319
+ try:
320
+ facts_text = self.reasoning_engine.generate_text(extraction_prompt, max_length=256)
321
+
322
+ # Parse the facts
323
+ for line in facts_text.split('\n'):
324
+ line = line.strip()
325
+ if line and (line[0].isdigit() or line.startswith('- ')):
326
+ # Remove numbering or bullet points
327
+ fact = re.sub(r'^\d+\.\s*|^-\s*', '', line).strip()
328
+ if fact and len(fact) > 10: # Only add substantial facts
329
+ self.memory_manager.add_important_fact(fact, "inference")
330
+ except Exception as e:
331
+ print(f"Error extracting facts with reasoning: {str(e)}")
332
+ # Continue without adding facts
333
+
334
+
335
+
336
+ def execute_task(self, task_description: str) -> Dict[str, Any]:
337
+ """Execute a task based on the description
338
+
339
+ Args:
340
+ task_description: Description of the task to execute
341
+
342
+ Returns:
343
+ Dictionary containing task results and status
344
+ """
345
+ return self.task_executor.execute_task(task_description)
346
+
347
+ def get_status(self) -> Dict[str, Any]:
348
+ """Get the current status of the agent
349
+
350
+ Returns:
351
+ Dictionary containing agent status information
352
+ """
353
+ memory_stats = self.memory_manager.get_memory_stats()
354
+ task_status = self.task_executor.get_task_status()
355
+
356
+ return {
357
+ "model_id": self.model_id,
358
+ "device": self.device,
359
+ "conversation_turns": memory_stats["conversation_turns"],
360
+ "important_facts": memory_stats["important_facts"],
361
+ "current_task": task_status["current_task"],
362
+ "task_status": task_status["status"]
363
+ }
364
+
365
+ def clear_conversation(self) -> None:
366
+ """Clear the conversation history"""
367
+ self.memory_manager.clear_conversation_history()
368
+
369
+ def process_document(self, document_text: str, document_type: str = "resume") -> Dict[str, Any]:
370
+ """Process a document (like a resume) and extract information
371
+
372
+ Args:
373
+ document_text: The text content of the document
374
+ document_type: The type of document (e.g., "resume", "job_description")
375
+
376
+ Returns:
377
+ Dictionary containing extracted information and analysis
378
+ """
379
+ self.memory_manager.store_session_data(f"last_{document_type}", document_text)
380
+ start_time = time.time()
381
+
382
+ # Create a prompt for document analysis
383
+ analysis_prompt = f"""I need to analyze this {document_type} document and extract key information:
384
+
385
+ {document_text}
386
+
387
+ Detailed analysis:"""
388
+
389
+ # Generate analysis using reasoning engine
390
+ analysis = self.reasoning_engine.generate_text(analysis_prompt, max_length=1024)
391
+
392
+ # Extract structured information based on document type
393
+ if document_type.lower() == "resume":
394
+ extraction_prompt = f"""Based on this resume:
395
+ {document_text}
396
+
397
+ Extract the following information in a structured format:
398
+ 1. Name:
399
+ 2. Contact Information:
400
+ 3. Education:
401
+ 4. Work Experience:
402
+ 5. Skills:
403
+ 6. Projects:
404
+ 7. Certifications:
405
+ 8. Languages:
406
+ 9. Key Strengths:
407
+ """
408
+ elif document_type.lower() == "job_description":
409
+ extraction_prompt = f"""Based on this job description:
410
+ {document_text}
411
+
412
+ Extract the following information in a structured format:
413
+ 1. Job Title:
414
+ 2. Company:
415
+ 3. Location:
416
+ 4. Required Skills:
417
+ 5. Required Experience:
418
+ 6. Education Requirements:
419
+ 7. Responsibilities:
420
+ 8. Benefits:
421
+ 9. Key Qualifications:
422
+ """
423
+ else:
424
+ extraction_prompt = f"""Extract key information from this document:
425
+ {document_text}
426
+
427
+ Key information:
428
+ 1. """
429
+
430
+ # Generate structured extraction
431
+ structured_info = self.reasoning_engine.generate_text(extraction_prompt, max_length=1024)
432
+
433
+ # Add important facts to memory
434
+ self._extract_document_facts(document_text, document_type, structured_info)
435
+
436
+ processing_time = time.time() - start_time
437
+
438
+ return {
439
+ "document_type": document_type,
440
+ "analysis": analysis,
441
+ "structured_info": structured_info,
442
+ "processing_time": processing_time,
443
+ "timestamp": time.time()
444
+ }
445
+
446
+ def _extract_document_facts(self, document_text: str, document_type: str, structured_info: str) -> None:
447
+ """Extract important facts from a document and add them to memory
448
+
449
+ Args:
450
+ document_text: The text content of the document
451
+ document_type: The type of document
452
+ structured_info: Structured information extracted from the document
453
+ """
454
+ # Extract key facts based on document type
455
+ if document_type.lower() == "resume":
456
+ # Extract name if present
457
+ name_match = re.search(r"Name:\s*([\w\s]+)\n", structured_info)
458
+ if name_match:
459
+ name = name_match.group(1).strip()
460
+ self.memory_manager.add_important_fact(f"Document contains resume for {name}", "document")
461
+
462
+ # Extract skills
463
+ skills_match = re.search(r"Skills:\s*([\w\s,\.\-\+]+)\n", structured_info)
464
+ if skills_match:
465
+ skills = skills_match.group(1).strip()
466
+ self.memory_manager.add_important_fact(f"Resume shows skills in: {skills}", "document")
467
+
468
+ # Extract education
469
+ education_match = re.search(r"Education:\s*([\w\s,\.\-\+]+)\n", structured_info)
470
+ if education_match:
471
+ education = education_match.group(1).strip()
472
+ self.memory_manager.add_important_fact(f"Resume shows education: {education}", "document")
473
+
474
+ elif document_type.lower() == "job_description":
475
+ # Extract job title
476
+ title_match = re.search(r"Job Title:\s*([\w\s]+)\n", structured_info)
477
+ if title_match:
478
+ title = title_match.group(1).strip()
479
+ self.memory_manager.add_important_fact(f"Document contains job description for {title}", "document")
480
+
481
+ # Extract required skills
482
+ skills_match = re.search(r"Required Skills:\s*([\w\s,\.\-\+]+)\n", structured_info)
483
+ if skills_match:
484
+ skills = skills_match.group(1).strip()
485
+ self.memory_manager.add_important_fact(f"Job requires skills in: {skills}", "document")
486
+
487
+ # Add general document fact
488
+ self.memory_manager.add_important_fact(f"Processed a {document_type} document", "system")
489
+
490
+ def rank_resumes(self, job_description: str, resumes: List[str]) -> Dict[str, Any]:
491
+ """Rank multiple resumes against a job description
492
+
493
+ Args:
494
+ job_description: The job description text
495
+ resumes: List of resume texts to rank
496
+
497
+ Returns:
498
+ Dictionary containing rankings and analysis
499
+ """
500
+ start_time = time.time()
501
+
502
+ # Process the job description first
503
+ job_result = self.process_document(job_description, "job_description")
504
+ job_analysis = job_result["structured_info"]
505
+
506
+ # Process each resume
507
+ resume_results = []
508
+ for i, resume in enumerate(resumes):
509
+ result = self.process_document(resume, "resume")
510
+ resume_results.append({
511
+ "index": i,
512
+ "text": resume,
513
+ "analysis": result["structured_info"]
514
+ })
515
+
516
+ # Create a ranking prompt
517
+ ranking_prompt = f"""I need to rank these resumes based on how well they match the job description.
518
+
519
+ Job Description Analysis:
520
+ {job_analysis}
521
+
522
+ Resumes:
523
+ """
524
+
525
+ for i, result in enumerate(resume_results):
526
+ ranking_prompt += f"\nResume {i+1}:\n{result['analysis']}\n"
527
+
528
+ ranking_prompt += "\nRank these resumes from best to worst match for the job, with detailed reasoning for each:"
529
+
530
+ # Generate the ranking analysis
531
+ ranking_analysis = self.reasoning_engine.generate_text(ranking_prompt, max_length=2048)
532
+
533
+ # Generate a numerical scoring for each resume
534
+ scoring_prompt = f"""Based on my analysis of how well these resumes match the job description:
535
+ {ranking_analysis}
536
+
537
+ Assign a numerical score from 0-100 for each resume, where 100 is a perfect match:
538
+
539
+ Resume 1 Score:"""
540
+
541
+ scores_text = self.reasoning_engine.generate_text(scoring_prompt, max_length=512)
542
+
543
+ # Parse scores (simple regex approach)
544
+ scores = []
545
+ for i in range(len(resume_results)):
546
+ score_match = re.search(f"Resume {i+1} Score:\s*(\d+)", scores_text)
547
+ if score_match:
548
+ scores.append(int(score_match.group(1)))
549
+ else:
550
+ # Default score if parsing fails
551
+ scores.append(50)
552
+
553
+ # Create the final rankings
554
+ rankings = []
555
+ for i, score in enumerate(scores):
556
+ rankings.append({
557
+ "resume_index": i,
558
+ "score": score,
559
+ "resume_text": resumes[i][:100] + "..." # Truncated for readability
560
+ })
561
+
562
+ # Sort by score (descending)
563
+ rankings.sort(key=lambda x: x["score"], reverse=True)
564
+
565
+ processing_time = time.time() - start_time
566
+
567
+ return {
568
+ "rankings": rankings,
569
+ "analysis": ranking_analysis,
570
+ "job_description": job_description,
571
+ "processing_time": processing_time
572
+ }
573
+
574
+ # Create the Gradio interface
575
+ def create_interface():
576
+ # Initialize the agent with a suitable model for Hugging Face Spaces
577
+ # Using a smaller model by default for better performance in Spaces
578
+ agent = ResuRankAgent(model_id="google/flan-t5-base", use_cache=True)
579
+
580
+ with gr.Blocks(title="ResuRank AI Agent") as interface:
581
+ gr.Markdown("# ResuRank AI Agent")
582
+ gr.Markdown("An autonomous AI agent that can process queries, perform reasoning, and execute tasks.")
583
+
584
+ with gr.Tab("Chat"):
585
+ chatbot = gr.Chatbot(height=400)
586
+ msg = gr.Textbox(label="Your message", placeholder="Ask me anything...")
587
+ with gr.Row():
588
+ submit_btn = gr.Button("Submit")
589
+ clear_btn = gr.Button("Clear")
590
+
591
+ reasoning_checkbox = gr.Checkbox(label="Use reasoning", value=True)
592
+
593
+ if reasoning_checkbox.value:
594
+ reasoning_output = gr.Textbox(label="Reasoning", interactive=False)
595
+ else:
596
+ reasoning_output = gr.Textbox(label="Reasoning", interactive=False, visible=False)
597
+
598
+ def respond(message, chat_history, use_reasoning):
599
+ if not message.strip():
600
+ return chat_history, "", ""
601
+
602
+ # Process the query
603
+ result = agent.process_query(message, use_reasoning=use_reasoning)
604
+
605
+ # Update chat history
606
+ chat_history.append((message, result["response"]))
607
+
608
+ return chat_history, "", result.get("reasoning", "")
609
+
610
+ def clear_chat():
611
+ agent.clear_conversation()
612
+ return [], "", ""
613
+
614
+ # Set up event handlers
615
+ submit_btn.click(respond, [msg, chatbot, reasoning_checkbox], [chatbot, msg, reasoning_output])
616
+ msg.submit(respond, [msg, chatbot, reasoning_checkbox], [chatbot, msg, reasoning_output])
617
+ clear_btn.click(clear_chat, None, [chatbot, msg, reasoning_output])
618
+ reasoning_checkbox.change(lambda x: gr.update(visible=x), reasoning_checkbox, reasoning_output)
619
+
620
+ with gr.Tab("Task Execution"):
621
+ task_input = gr.Textbox(label="Task Description", placeholder="Describe the task to execute...")
622
+ execute_btn = gr.Button("Execute Task")
623
+
624
+ with gr.Row():
625
+ with gr.Column():
626
+ plan_output = gr.Textbox(label="Execution Plan", interactive=False)
627
+ with gr.Column():
628
+ results_output = gr.Textbox(label="Task Results", interactive=False)
629
+
630
+ task_status = gr.Textbox(label="Task Status", value="idle", interactive=False)
631
+
632
+ def execute_task(task_description):
633
+ if not task_description.strip():
634
+ return "No task provided.", "", "idle"
635
+
636
+ # Execute the task
637
+ result = agent.execute_task(task_description)
638
+
639
+ return result.get("plan", ""), result.get("result", ""), result.get("status", "")
640
+
641
+ # Set up event handlers
642
+ execute_btn.click(execute_task, task_input, [plan_output, results_output, task_status])
643
+
644
+ with gr.Tab("Agent Status"):
645
+ status_btn = gr.Button("Refresh Status")
646
+
647
+ with gr.Row():
648
+ with gr.Column():
649
+ model_info = gr.Textbox(label="Model Information", interactive=False)
650
+ with gr.Column():
651
+ conversation_info = gr.Textbox(label="Conversation Information", interactive=False)
652
+
653
+ def update_status():
654
+ status = agent.get_status()
655
+ model_text = f"Model ID: {status['model_id']}\nDevice: {status['device']}"
656
+ conversation_text = f"Conversation Length: {status['conversation_turns']} turns\nImportant Facts: {len(status['important_facts'])}\nCurrent Task: {status['current_task'] or 'None'}\nTask Status: {status['task_status']}"
657
+
658
+ return model_text, conversation_text
659
+
660
+ # Set up event handlers
661
+ status_btn.click(update_status, None, [model_info, conversation_info])
662
+
663
+ # Initialize status on load
664
+ model_info.value, conversation_info.value = update_status()
665
+
666
+ with gr.Tab("Document Processing"):
667
+ with gr.Row():
668
+ with gr.Column():
669
+ document_input = gr.Textbox(label="Document Text", placeholder="Paste resume or job description text here...", lines=10)
670
+ document_type = gr.Radio(["resume", "job_description", "other"], label="Document Type", value="resume")
671
+ process_btn = gr.Button("Process Document")
672
+
673
+ with gr.Row():
674
+ with gr.Column():
675
+ analysis_output = gr.Textbox(label="Document Analysis", interactive=False, lines=10)
676
+ with gr.Column():
677
+ structured_output = gr.Textbox(label="Structured Information", interactive=False, lines=10)
678
+
679
+ def process_document(document_text, doc_type):
680
+ if not document_text.strip():
681
+ return "No document provided.", ""
682
+
683
+ # Process the document
684
+ result = agent.process_document(document_text, doc_type)
685
+
686
+ return result.get("analysis", ""), result.get("structured_info", "")
687
+
688
+ # Set up event handlers
689
+ process_btn.click(process_document, [document_input, document_type], [analysis_output, structured_output])
690
+
691
+ with gr.Tab("Resume Ranking"):
692
+ with gr.Row():
693
+ with gr.Column():
694
+ job_description_input = gr.Textbox(label="Job Description", placeholder="Paste job description here...", lines=8)
695
+
696
+ with gr.Row():
697
+ with gr.Column():
698
+ resume1_input = gr.Textbox(label="Resume 1", placeholder="Paste first resume here...", lines=6)
699
+ with gr.Column():
700
+ resume2_input = gr.Textbox(label="Resume 2", placeholder="Paste second resume here...", lines=6)
701
+
702
+ with gr.Row():
703
+ with gr.Column():
704
+ resume3_input = gr.Textbox(label="Resume 3 (Optional)", placeholder="Paste third resume here...", lines=6)
705
+ with gr.Column():
706
+ resume4_input = gr.Textbox(label="Resume 4 (Optional)", placeholder="Paste fourth resume here...", lines=6)
707
+
708
+ rank_btn = gr.Button("Rank Resumes")
709
+
710
+ ranking_output = gr.Textbox(label="Ranking Results", interactive=False, lines=15)
711
+
712
+ def rank_resumes(job_desc, resume1, resume2, resume3, resume4):
713
+ if not job_desc.strip() or not resume1.strip() or not resume2.strip():
714
+ return "Please provide at least a job description and two resumes."
715
+
716
+ # Collect all non-empty resumes
717
+ resumes = [r for r in [resume1, resume2, resume3, resume4] if r.strip()]
718
+
719
+ # Rank the resumes
720
+ result = agent.rank_resumes(job_desc, resumes)
721
+
722
+ # Format the results
723
+ output = "Resume Rankings (Best to Worst Match):\n\n"
724
+
725
+ for i, rank in enumerate(result["rankings"]):
726
+ resume_num = rank["resume_index"] + 1
727
+ score = rank["score"]
728
+ output += f"{i+1}. Resume {resume_num} - Score: {score}/100\n"
729
+
730
+ output += "\nDetailed Analysis:\n" + result["analysis"]
731
+
732
+ return output
733
+
734
+ # Set up event handlers
735
+ rank_btn.click(rank_resumes, [job_description_input, resume1_input, resume2_input, resume3_input, resume4_input], ranking_output)
736
+
737
+ return interface
738
+
739
+ # Launch the interface when run directly
740
+ if __name__ == "__main__":
741
+ interface = create_interface()
742
+ interface.launch()
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ transformers>=4.30.0
2
+ torch>=2.0.0
3
+ fastapi>=0.95.0
4
+ uvicorn>=0.22.0
5
+ python-dotenv>=1.0.0
6
+ pydantic>=2.0.0
7
+ gradio>=3.35.0
8
+ huggingface_hub>=0.16.0
9
+ requests>=2.31.0
10
+ pillow>=9.5.0
11
+ numpy>=1.24.0
12
+ pandas>=2.0.0