Yurhu commited on
Commit
161b63e
·
verified ·
1 Parent(s): eddba15

Push from a local computer

Browse files
.pytest_cache/v/cache/lastfailed CHANGED
@@ -1,14 +1,5 @@
1
  {
2
- "tests/model_evaluation/test_model_evaluator.py::test_evaluate": true,
3
- "tests/model_evaluation/test_model_evaluator.py::test_evaluate_subset": true,
4
- "tests/model_evaluation/test_model_evaluator.py::test_save_results": true,
5
- "tests/model_evaluation/test_model_evaluator.py::test_save_object": true,
6
- "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_evaluate": true,
7
- "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_evaluate_subset": true,
8
- "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_save_object": true,
9
- "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_save_results": true,
10
- "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_when_evaluating_return_dict": true,
11
- "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_save_metrics": true,
12
- "tests/tasks/evaluation/test_evaluation_piaf.py::TaskPIAFTest::test_given_a_prediction_smaller_than_corpus_when_compute_then_return_expected_result_and_warning": true,
13
- "tests/tasks/evaluation/test_evaluation_piaf.py::TaskPIAFTest::test_given_a_prediction_when_compute_then_return_expected_result_no_warnings": true
14
  }
 
1
  {
2
+ "tests/tasks/evaluation/test_evaluation_expressions_quebecoises.py::TaskexpressionsquebecoisesTest::test_given_a_prediction_larger_than_ground_truth_raise_error": true,
3
+ "tests/tasks/evaluation/test_evaluation_expressions_quebecoises.py::TaskexpressionsquebecoisesTest::test_given_a_prediction_smaller_than_corpus_when_compute_then_return_expected_result_and_warning": true,
4
+ "tests/tasks/evaluation/test_evaluation_expressions_quebecoises.py::TaskexpressionsquebecoisesTest::test_given_a_prediction_when_compute_then_return_expected_result_no_warnings": true
 
 
 
 
 
 
 
 
 
5
  }
.pytest_cache/v/cache/nodeids CHANGED
@@ -1,5 +1,4 @@
1
  [
2
- "predictions/cuda_test.py::test_cuda",
3
  "tests/backend/test_evaluation.py::ComputeTasksRatingsTest::test_evaluation_loop",
4
  "tests/backend/test_submission_api.py::test_health_check",
5
  "tests/backend/test_submission_api.py::test_leaderboard_empty",
@@ -17,23 +16,10 @@
17
  "tests/backend/test_validation_tools.py::ValidateSubmissionJSONTest::test_given_a_json_of_unaccepted_format_when_validate_then_does_raise_error",
18
  "tests/backend/test_validation_tools.py::ValidateSubmissionTasksNameJSONTest::test_given_a_json_of_accepted_task_when_validate_then_does_not_raise_error",
19
  "tests/backend/test_validation_tools.py::ValidateSubmissionTasksNameJSONTest::test_given_a_json_of_unaccepted_task_when_validate_then_does_raise_error",
20
- "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_compute_metrics",
21
- "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_evaluate",
22
- "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_evaluate_subset",
23
- "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_save_metrics",
24
- "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_save_object",
25
- "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_save_results",
26
  "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_when_compute_metrics_return_metrics_dict",
27
- "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_when_evaluating_return_dict",
28
  "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_when_evaluating_return_formatted_dict",
29
  "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_when_task_is_generative_generate",
30
  "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_when_task_is_inference_infer",
31
- "tests/model_evaluation/test_model_evaluator.py::test_compute_metrics",
32
- "tests/model_evaluation/test_model_evaluator.py::test_evaluate",
33
- "tests/model_evaluation/test_model_evaluator.py::test_evaluate_subset",
34
- "tests/model_evaluation/test_model_evaluator.py::test_save_metrics",
35
- "tests/model_evaluation/test_model_evaluator.py::test_save_object",
36
- "tests/model_evaluation/test_model_evaluator.py::test_save_results",
37
  "tests/tasks/evaluation/test_evaluation_allocine.py::TaskAllocineTest::test_given_a_prediction_larger_than_ground_truth_raise_error",
38
  "tests/tasks/evaluation/test_evaluation_allocine.py::TaskAllocineTest::test_given_a_prediction_smaller_than_corpus_when_compute_then_return_expected_result_and_warning",
39
  "tests/tasks/evaluation/test_evaluation_allocine.py::TaskAllocineTest::test_given_a_prediction_when_compute_then_return_expected_result_no_warnings",
@@ -46,9 +32,6 @@
46
  "tests/tasks/evaluation/test_evaluation_gqnli.py::TaskGQNLITest::test_given_a_prediction_larger_than_ground_truth_raise_error",
47
  "tests/tasks/evaluation/test_evaluation_gqnli.py::TaskGQNLITest::test_given_a_prediction_smaller_than_corpus_when_compute_then_return_expected_result_and_warning",
48
  "tests/tasks/evaluation/test_evaluation_gqnli.py::TaskGQNLITest::test_given_a_prediction_when_compute_then_return_expected_result_no_warnings",
49
- "tests/tasks/evaluation/test_evaluation_opus_parcus.py::TaskOpusParcusTest::test_given_a_prediction_larger_than_ground_truth_raise_error",
50
- "tests/tasks/evaluation/test_evaluation_opus_parcus.py::TaskOpusParcusTest::test_given_a_prediction_smaller_than_corpus_when_compute_then_return_expected_result_and_warning",
51
- "tests/tasks/evaluation/test_evaluation_opus_parcus.py::TaskOpusParcusTest::test_given_a_prediction_when_compute_then_return_expected_result_no_warnings",
52
  "tests/tasks/evaluation/test_evaluation_paws_x.py::TaskPawsXTest::test_given_a_prediction_larger_than_ground_truth_raise_error",
53
  "tests/tasks/evaluation/test_evaluation_paws_x.py::TaskPawsXTest::test_given_a_prediction_smaller_than_corpus_when_compute_then_return_expected_result_and_warning",
54
  "tests/tasks/evaluation/test_evaluation_paws_x.py::TaskPawsXTest::test_given_a_prediction_when_compute_then_return_expected_result_no_warnings",
 
1
  [
 
2
  "tests/backend/test_evaluation.py::ComputeTasksRatingsTest::test_evaluation_loop",
3
  "tests/backend/test_submission_api.py::test_health_check",
4
  "tests/backend/test_submission_api.py::test_leaderboard_empty",
 
16
  "tests/backend/test_validation_tools.py::ValidateSubmissionJSONTest::test_given_a_json_of_unaccepted_format_when_validate_then_does_raise_error",
17
  "tests/backend/test_validation_tools.py::ValidateSubmissionTasksNameJSONTest::test_given_a_json_of_accepted_task_when_validate_then_does_not_raise_error",
18
  "tests/backend/test_validation_tools.py::ValidateSubmissionTasksNameJSONTest::test_given_a_json_of_unaccepted_task_when_validate_then_does_raise_error",
 
 
 
 
 
 
19
  "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_when_compute_metrics_return_metrics_dict",
 
20
  "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_when_evaluating_return_formatted_dict",
21
  "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_when_task_is_generative_generate",
22
  "tests/model_evaluation/test_model_evaluator.py::ModelEvaluatorTest::test_when_task_is_inference_infer",
 
 
 
 
 
 
23
  "tests/tasks/evaluation/test_evaluation_allocine.py::TaskAllocineTest::test_given_a_prediction_larger_than_ground_truth_raise_error",
24
  "tests/tasks/evaluation/test_evaluation_allocine.py::TaskAllocineTest::test_given_a_prediction_smaller_than_corpus_when_compute_then_return_expected_result_and_warning",
25
  "tests/tasks/evaluation/test_evaluation_allocine.py::TaskAllocineTest::test_given_a_prediction_when_compute_then_return_expected_result_no_warnings",
 
32
  "tests/tasks/evaluation/test_evaluation_gqnli.py::TaskGQNLITest::test_given_a_prediction_larger_than_ground_truth_raise_error",
33
  "tests/tasks/evaluation/test_evaluation_gqnli.py::TaskGQNLITest::test_given_a_prediction_smaller_than_corpus_when_compute_then_return_expected_result_and_warning",
34
  "tests/tasks/evaluation/test_evaluation_gqnli.py::TaskGQNLITest::test_given_a_prediction_when_compute_then_return_expected_result_no_warnings",
 
 
 
35
  "tests/tasks/evaluation/test_evaluation_paws_x.py::TaskPawsXTest::test_given_a_prediction_larger_than_ground_truth_raise_error",
36
  "tests/tasks/evaluation/test_evaluation_paws_x.py::TaskPawsXTest::test_given_a_prediction_smaller_than_corpus_when_compute_then_return_expected_result_and_warning",
37
  "tests/tasks/evaluation/test_evaluation_paws_x.py::TaskPawsXTest::test_given_a_prediction_when_compute_then_return_expected_result_no_warnings",
Dockerfile CHANGED
@@ -15,8 +15,9 @@ RUN apt-get update && apt-get install -y nginx \
15
  && rm -rf /var/lib/apt/lists/*
16
 
17
  COPY src/docker_requirements.txt /app/src/
18
- RUN pip install -r /app/src/docker_requirements.txt && \
19
- rm -rf ~/.cache/pip
 
20
  COPY src/ /app/src/
21
 
22
  # Copy Nginx config (adjust path if needed)
 
15
  && rm -rf /var/lib/apt/lists/*
16
 
17
  COPY src/docker_requirements.txt /app/src/
18
+ RUN pip install --upgrade pip wheel
19
+ RUN pip install --cache-dir=~/.cache/pip --prefer-binary pyarrow pandas numpy scipy fsspec aiohttp tqdm --progress-bar off -v
20
+ RUN pip install --cache-dir=~/.cache/pip -r /app/src/docker_requirements.txt -v --prefer-binary && rm -rf ~/.cache/pip
21
  COPY src/ /app/src/
22
 
23
  # Copy Nginx config (adjust path if needed)
frontend/src/app/results/[id]/page.js CHANGED
@@ -34,7 +34,7 @@ export default function ResultsPage() {
34
  const handleDownload = async () => {
35
  if (!data) return;
36
  try {
37
- const res = await fetch(`http://localhost:8000/results/${submissionId}.json`);
38
  if (!res.ok) throw new Error(`HTTP ${res.status}`);
39
  const blob = await res.blob();
40
  const url = URL.createObjectURL(blob);
 
34
  const handleDownload = async () => {
35
  if (!data) return;
36
  try {
37
+ const res = await fetch(`${BACKEND_ADDRESS}/results/${submissionId}.json`);
38
  if (!res.ok) throw new Error(`HTTP ${res.status}`);
39
  const blob = await res.blob();
40
  const url = URL.createObjectURL(blob);
push_to_spaces.py CHANGED
@@ -7,5 +7,5 @@ upload_folder(
7
  repo_id=repo_id,
8
  folder_path=local_dir,
9
  repo_type="space", # Important: this tells HF it's a Space
10
- commit_message="Initial snapshot upload",
11
  )
 
7
  repo_id=repo_id,
8
  folder_path=local_dir,
9
  repo_type="space", # Important: this tells HF it's a Space
10
+ commit_message="Push from a local computer",
11
  )
src/backend/submission_api.py CHANGED
@@ -5,6 +5,7 @@ import os
5
  import sys
6
  import uuid
7
  from contextlib import asynccontextmanager
 
8
  from functools import lru_cache
9
  from pathlib import Path
10
  from typing import Dict, List, Any
@@ -73,6 +74,7 @@ async def submit(
73
  predictions_zip: UploadFile = File(...),
74
  display_name: str = Form(...),
75
  ):
 
76
  info_message = f"Submission from {email!r} as {display_name!r}."
77
  logging.info(info_message)
78
  zip_bytes = await predictions_zip.read()
@@ -83,8 +85,12 @@ async def submit(
83
  validate_submission_json(submission_json)
84
 
85
  tasks: List[Task] = tasks_factory(submission_json)
 
 
86
  submission_response = compute_tasks_ratings(tasks=tasks, submission=submission_json)
87
-
 
 
88
  submission_id = str(uuid.uuid4())
89
  submission_response.update(
90
  {
 
5
  import sys
6
  import uuid
7
  from contextlib import asynccontextmanager
8
+ from datetime import datetime
9
  from functools import lru_cache
10
  from pathlib import Path
11
  from typing import Dict, List, Any
 
74
  predictions_zip: UploadFile = File(...),
75
  display_name: str = Form(...),
76
  ):
77
+ logging.info("Starting submission")
78
  info_message = f"Submission from {email!r} as {display_name!r}."
79
  logging.info(info_message)
80
  zip_bytes = await predictions_zip.read()
 
85
  validate_submission_json(submission_json)
86
 
87
  tasks: List[Task] = tasks_factory(submission_json)
88
+ logging.info("Computation started")
89
+ start = datetime.now()
90
  submission_response = compute_tasks_ratings(tasks=tasks, submission=submission_json)
91
+ computation_time = datetime.now() - start
92
+ info_message = f"Computation ended in {computation_time}"
93
+ logging.info(info_message)
94
  submission_id = str(uuid.uuid4())
95
  submission_response.update(
96
  {
src/docker_requirements.txt CHANGED
@@ -9,8 +9,9 @@ uvicorn
9
  # Optional: pretty printing, progress bars, etc.
10
  tqdm
11
  aenum
12
- accelerate
13
  evaluate
14
  wheel
 
15
  # Pour compatibilité ancienne
16
  protobuf<=3.20.3
 
9
  # Optional: pretty printing, progress bars, etc.
10
  tqdm
11
  aenum
12
+
13
  evaluate
14
  wheel
15
+
16
  # Pour compatibilité ancienne
17
  protobuf<=3.20.3
start.sh CHANGED
@@ -1,23 +1,25 @@
1
- #!/bin/sh
2
- # Start FastAPI
3
- echo '🔧 Starting backend...'
4
- uvicorn src.backend.submission_api:app --host 0.0.0.0 --port 8000 &
5
-
6
- until curl -s http://localhost:8000/ > /dev/null; do
7
- echo "FastAPI not ready yet. Retrying in 1s..."
8
- sleep 1
9
- done
10
-
11
- echo '✅ Backend ready'
12
- ls
13
- # Start Next.js
14
- echo '🚀 Starting frontend'
15
- cd /app/frontend
16
- npm run start -- -p 8001 &
17
- echo '🚀 Frontend ready'
18
- # Start Nginx to reverse proxy
19
- echo "Starting Nginx"
20
- nginx -g "daemon off;"
21
- echo "Nginx ready"
22
-
 
 
23
  echo " container ready !"
 
1
+ #!/bin/sh
2
+ # Start FastAPI
3
+ echo '🔧 Starting backend...'
4
+ uvicorn src.backend.submission_api:app \
5
+ --host 0.0.0.0 --port 8000 --log-level debug \
6
+ > /app/backend.log 2>&1 &
7
+
8
+ until curl -s http://localhost:8000/ > /dev/null; do
9
+ echo "FastAPI not ready yet. Retrying in 5 seconds..."
10
+ sleep 5
11
+ done
12
+
13
+ echo '✅ Backend ready'
14
+ ls
15
+ # Start Next.js
16
+ echo '🚀 Starting frontend'
17
+ cd frontend
18
+ npm run start -- -p 8001 &
19
+ echo '🚀 Frontend ready'
20
+ # Start Nginx to reverse proxy
21
+ echo "Starting Nginx"
22
+ nginx -g "daemon off;"
23
+ echo "Nginx ready"
24
+
25
  echo " container ready !"