Oleg Lavrovsky commited on
Commit
47ab1d0
·
unverified ·
1 Parent(s): a9c00ee

Result format

Browse files
Files changed (1) hide show
  1. app.py +4 -7
app.py CHANGED
@@ -132,22 +132,19 @@ async def predict(q: str):
132
  )
133
 
134
  # Get and decode the output
135
- print(generated_ids)
136
- output_ids = generated_ids[0][-1]
137
- logger.debug(output_ids)
138
- #[len(model_inputs.input_ids[0]) :]
139
  result = tokenizer.decode(output_ids, skip_special_tokens=True)
140
 
141
  # Checkpoint
142
  processing_time = time.time() - start_time
143
 
144
  return ModelResponse(
145
- text=result['label'],
146
- confidence=result['score'],
147
  processing_time=processing_time
148
  )
149
 
150
- except Exception as e:
151
  logger.error(f"Evaluation error: {e}")
152
  raise HTTPException(status_code=500, detail="Evaluation failed")
153
 
 
132
  )
133
 
134
  # Get and decode the output
135
+ output_ids = generated_ids[0][len(model_inputs.input_ids[0]) :]
 
 
 
136
  result = tokenizer.decode(output_ids, skip_special_tokens=True)
137
 
138
  # Checkpoint
139
  processing_time = time.time() - start_time
140
 
141
  return ModelResponse(
142
+ text=result, #['label'],
143
+ confidence=0, #result['score'],
144
  processing_time=processing_time
145
  )
146
 
147
+ except HTTPException as e:
148
  logger.error(f"Evaluation error: {e}")
149
  raise HTTPException(status_code=500, detail="Evaluation failed")
150