File size: 6,618 Bytes
75ec748 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 |
import json
import logging
import os
from datetime import datetime
from typing import Dict, List
from tqdm import tqdm
from src.model.model import Model
from src.task.task import Task, Tasktype
from src.task.task_factory import tasks_factory
class ModelEvaluator:
"""
The model evaluator acts as a pipeline for evaluation models on tasks available from tasks_factory.
"""
def __init__(self):
self.last_predictions = {}
self.last_metrics = {}
self.last_model_name = None
def compute_metrics(self) -> Dict:
"""
Compute metrics over the last tested model's predictions,
must have called one the evaluate functions before or loaded predictions with load_predictions_from_file.
"""
metrics = []
for task_dict in self.last_predictions["tasks"]:
task_name, preds = list(task_dict.items())[0]
if not preds:
warning_message = f"Task '{task_name}' ignored due to no predictions"
logging.warning(warning_message)
continue
try:
tasks = tasks_factory([task_name])
task = tasks[0]
metric_score, warning = task.compute(preds)
except Exception as e:
error_message = f"Error while calculating metrics'{task_name}' : {e}"
logging.error(error_message)
continue
metric_name = task.metric_name
task_entry = {
task_name: {
metric_name: {**metric_score, f"{metric_name}_warning": warning}
}
}
metrics.append(task_entry)
self.last_metrics = metrics
metrics = self.last_predictions
metrics["tasks"] = self.last_metrics
self.last_metrics = metrics
return metrics
def load_predictions_from_file(self, file_path: str) -> None:
"""
Load predictions from file to compute metrics :param file_path:path to the predictions file.
"""
try:
with open(file_path, "r", encoding="utf-8") as f:
self.last_predictions = json.load(f)
except FileNotFoundError:
error = f"File not found: {file_path}"
logging.error(error)
self.last_predictions = None
except json.JSONDecodeError:
error = f"Invalid JSON in file: {file_path}"
logging.error(error)
self.last_predictions = None
def save_metrics(self, save_path):
"""
Saves computed metrics to a json file.
:param save_path : the path to which the json file will be saved.
"""
if self.last_metrics is None:
logging.info("No metrics saved")
return None
return self.save_object(
save_path,
self.last_metrics,
f"{self.last_model_name.replace('/', '_')}_metrics.json",
)
def evaluate(self, model: Model, tasks: List[Task]):
"""
Evaluates a given model on the given tasks.
:param model : the model that will infer on the given tasks.
:param tasks : the tasks to be evaluated on.
"""
return self.evaluate_subset(model, tasks)
def evaluate_subset(
self, model: Model, tasks: List[Task], subset_size=None
) -> Dict:
"""
Evaluates a given model on the given tasks, but only on a given size.
:param model : the model that will infer on the given tasks.
:param tasks : the tasks to be evaluated on.
:param subset_size : the size of the subset to be evaluated.
"""
predictions = []
for task in tqdm(tasks, desc="Evaluating model on tasks", total=len(tasks)):
info_log = (
f"-----Doing task '{task.task_name}' with model '{model.name}-----'."
)
logging.info(info_log)
try:
if subset_size is None:
prompts = task.dataset.prompts[:]
else:
prompts = task.dataset.prompts[:subset_size]
if task.task_type == Tasktype.INFERENCE:
task_predictions = model.infer(
prompts, task.dataset.possible_ground_truths
)
elif task.task_type == Tasktype.GENERATIVE:
task_predictions = model.generate(prompts)
else:
error_message = f"Unknown task type {task.task_type}"
logging.error(error_message)
task_predictions = None
task_predictions = {task.task_name: task_predictions}
predictions.append(task_predictions)
except Exception as e:
error_message = f"Task '{task.task_name}' has failed : {e}"
logging.error(error_message)
continue
self.last_predictions = {
"model_name": model.name,
"model_url": f"https://huggingface.co/{model.name}",
"tasks": predictions,
}
self.last_model_name = model.name
return self.last_predictions
def save_results(self, save_path):
"""
Saves inferred metrics to a json file.
:param save_path : the path to which the json file will be saved.
"""
if self.last_model_name is None:
logging.error("Please evaluate before saving results")
return None
date_time_stamp = datetime.now().strftime("%Y%m%d-%H%M")
return self.save_object(
save_path,
self.last_predictions,
f"{self.last_model_name.replace('/', '_')}_{date_time_stamp}.json",
)
def save_object(self, save_dir_path, saved_object, filename):
"""
Utility method to save the given object into a json file.
"""
os.makedirs(save_dir_path, exist_ok=True)
full_path = os.path.join(save_dir_path, filename)
try:
with open(full_path, "w", encoding="utf-8") as f:
json.dump(saved_object, f, indent=2)
info_message = f"Results saved to {save_dir_path}"
logging.info(info_message)
except Exception as e:
error_message = f"Failed to save object: {e}"
logging.error(error_message)
return full_path
|