|
|
import os |
|
|
import gradio as gr |
|
|
import requests |
|
|
import pandas as pd |
|
|
from typing import Dict, List |
|
|
import asyncio |
|
|
|
|
|
|
|
|
from agents import Agent |
|
|
from tool import get_tools |
|
|
from model import get_model |
|
|
|
|
|
|
|
|
DEFAULT_API_URL = "https://huggingface.co/proxy/agents-course-unit4-scoring.hf.space" |
|
|
MODEL_ID = "groq/llama-3.3-70b-versatile" |
|
|
RATE_LIMIT_DELAY = 1 |
|
|
|
|
|
|
|
|
|
|
|
async def process_question(agent, question: str, task_id: str) -> Dict: |
|
|
"""Process a single question and return both answer AND full log entry""" |
|
|
try: |
|
|
answer = agent(question) |
|
|
return { |
|
|
"submission": {"task_id": task_id, "submitted_answer": answer}, |
|
|
"log": {"Task ID": task_id, "Question": question, "Submitted Answer": answer} |
|
|
} |
|
|
except Exception as e: |
|
|
error_msg = f"ERROR: {str(e)}" |
|
|
return { |
|
|
"submission": {"task_id": task_id, "submitted_answer": error_msg}, |
|
|
"log": {"Task ID": task_id, "Question": question, "Submitted Answer": error_msg} |
|
|
} |
|
|
|
|
|
async def run_questions_async(agent, questions_data: List[Dict]) -> tuple: |
|
|
"""Process questions sequentially with minimal rate limiting""" |
|
|
submissions = [] |
|
|
logs = [] |
|
|
|
|
|
total = len(questions_data) |
|
|
for idx, q in enumerate(questions_data): |
|
|
print(f"Processing {idx+1}/{total}: {q['question'][:80]}...") |
|
|
|
|
|
|
|
|
if idx > 0: |
|
|
await asyncio.sleep(RATE_LIMIT_DELAY) |
|
|
|
|
|
result = await process_question(agent, q["question"], q["task_id"]) |
|
|
submissions.append(result["submission"]) |
|
|
logs.append(result["log"]) |
|
|
|
|
|
return submissions, logs |
|
|
|
|
|
|
|
|
async def run_and_submit_all(profile: gr.OAuthProfile | None): |
|
|
""" |
|
|
Fetches all questions, runs the Agent on them, submits all answers, |
|
|
and displays the results. |
|
|
""" |
|
|
space_id = os.getenv("SPACE_ID") |
|
|
|
|
|
if profile: |
|
|
username = f"{profile.username}" |
|
|
print(f"User logged in: {username}") |
|
|
else: |
|
|
print("User not logged in.") |
|
|
return "Please Login to Hugging Face with the button.", None |
|
|
|
|
|
api_url = DEFAULT_API_URL |
|
|
questions_url = f"{api_url}/questions" |
|
|
submit_url = f"{api_url}/submit" |
|
|
|
|
|
|
|
|
try: |
|
|
agent = Agent( |
|
|
model=get_model("LiteLLMModel", MODEL_ID), |
|
|
tools=get_tools() |
|
|
) |
|
|
except Exception as e: |
|
|
print(f"Error instantiating agent: {e}") |
|
|
return f"Error initializing agent: {e}", None |
|
|
|
|
|
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" |
|
|
print(f"Agent code: {agent_code}") |
|
|
|
|
|
|
|
|
print(f"Fetching questions from: {questions_url}") |
|
|
try: |
|
|
response = requests.get(questions_url, timeout=15) |
|
|
response.raise_for_status() |
|
|
questions_data = response.json() |
|
|
if not questions_data: |
|
|
print("Fetched questions list is empty.") |
|
|
return "Fetched questions list is empty or invalid format.", None |
|
|
print(f"Fetched {len(questions_data)} questions.") |
|
|
estimated_time = len(questions_data) * RATE_LIMIT_DELAY / 60 |
|
|
print(f"β±οΈ Estimated time: {estimated_time:.1f} minutes") |
|
|
except Exception as e: |
|
|
print(f"Error fetching questions: {e}") |
|
|
return f"Error fetching questions: {e}", None |
|
|
|
|
|
|
|
|
print(f"Running agent on {len(questions_data)} questions...") |
|
|
answers_payload, results_log = await run_questions_async(agent, questions_data) |
|
|
|
|
|
if not answers_payload: |
|
|
print("Agent did not produce any answers to submit.") |
|
|
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log) |
|
|
|
|
|
|
|
|
submission_data = { |
|
|
"username": username.strip(), |
|
|
"agent_code": agent_code, |
|
|
"answers": answers_payload |
|
|
} |
|
|
print(f"Submitting {len(answers_payload)} answers for user '{username}'...") |
|
|
|
|
|
|
|
|
try: |
|
|
response = requests.post(submit_url, json=submission_data, timeout=60) |
|
|
response.raise_for_status() |
|
|
result_data = response.json() |
|
|
final_status = ( |
|
|
f"β
Submission Successful!\n\n" |
|
|
f"User: {result_data.get('username')}\n" |
|
|
f"Overall Score: {result_data.get('score', 'N/A')}% " |
|
|
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n\n" |
|
|
f"Message: {result_data.get('message', 'No message received.')}\n\n" |
|
|
f"Leaderboard: {api_url}/leaderboard" |
|
|
) |
|
|
print("Submission successful.") |
|
|
results_df = pd.DataFrame(results_log) |
|
|
return final_status, results_df |
|
|
except Exception as e: |
|
|
status_message = f"β Submission Failed: {e}" |
|
|
print(status_message) |
|
|
results_df = pd.DataFrame(results_log) |
|
|
return status_message, results_df |
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(theme=gr.themes.Soft()) as demo: |
|
|
gr.Markdown("# π€ GAIA Agent Evaluation") |
|
|
gr.Markdown( |
|
|
f""" |
|
|
**Instructions:** |
|
|
1. Log in to your Hugging Face account using the button below |
|
|
2. Click 'Run Evaluation & Submit' to test your agent |
|
|
3. The agent will use web search and other tools to answer questions |
|
|
|
|
|
**Current Setup:** |
|
|
- Model: Llama 3.3 70B (via Groq) |
|
|
- Tools: Web search, Wikipedia, calculation, and more |
|
|
- Rate Limiting: {RATE_LIMIT_DELAY}s between requests |
|
|
|
|
|
β οΈ **Note:** Make sure you have set your GROQ_API_KEY in the Space secrets. |
|
|
""" |
|
|
) |
|
|
|
|
|
gr.LoginButton() |
|
|
|
|
|
run_button = gr.Button("π Run Evaluation & Submit", variant="primary") |
|
|
|
|
|
status_output = gr.Textbox(label="π Status / Results", lines=8, interactive=False) |
|
|
results_table = gr.DataFrame(label="π Questions and Answers", wrap=True, max_height=400) |
|
|
|
|
|
run_button.click( |
|
|
fn=run_and_submit_all, |
|
|
outputs=[status_output, results_table] |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
print("\n" + "="*70) |
|
|
print("π€ GAIA Agent Starting") |
|
|
print("="*70) |
|
|
print(f"π Using Model: {MODEL_ID}") |
|
|
|
|
|
space_host = os.getenv("SPACE_HOST") |
|
|
space_id = os.getenv("SPACE_ID") |
|
|
|
|
|
if space_host: |
|
|
print(f"β
Runtime URL: https://{space_host}.hf.space") |
|
|
if space_id: |
|
|
print(f"β
Repo URL: https://huggingface.co/spaces/{space_id}") |
|
|
|
|
|
print("="*70 + "\n") |
|
|
demo.launch(debug=True, share=False) |