|
from transformers.trainer import * |
|
|
|
|
|
class MyTrainer(Trainer): |
|
def _save(self, output_dir: Optional[str] = None, state_dict=None): |
|
|
|
output_dir = output_dir if output_dir is not None else self.args.output_dir |
|
os.makedirs(output_dir, exist_ok=True) |
|
logger.info(f"Saving model checkpoint to {output_dir}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if not hasattr(self.model, "save"): |
|
raise NotImplementedError( |
|
f"MODEL {self.model.__class__.__name__} " |
|
f"does not support save interface" |
|
) |
|
else: |
|
deepspeed = False |
|
if self.deepspeed: |
|
deepspeed = True |
|
self.model.save(output_dir, deepspeed=deepspeed) |
|
|
|
|
|
|
|
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) |
|
|
|
def _save_checkpoint(self, model, trial, metrics=None): |
|
|
|
|
|
|
|
|
|
|
|
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}" |
|
|
|
if self.hp_search_backend is None and trial is None: |
|
self.store_flos() |
|
|
|
run_dir = self._get_output_dir(trial=trial) |
|
output_dir = os.path.join(run_dir, checkpoint_folder) |
|
self.save_model(output_dir, _internal_call=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if self.sharded_ddp == ShardedDDPOption.SIMPLE: |
|
self.optimizer.consolidate_state_dict() |
|
|
|
if self.fsdp: |
|
|
|
|
|
|
|
full_osd = self.model.__class__.full_optim_state_dict( |
|
self.model, self.optimizer |
|
) |
|
|
|
if is_torch_tpu_available(): |
|
xm.rendezvous("saving_optimizer_states") |
|
xm.save( |
|
self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME) |
|
) |
|
with warnings.catch_warnings(record=True) as caught_warnings: |
|
xm.save( |
|
self.lr_scheduler.state_dict(), |
|
os.path.join(output_dir, SCHEDULER_NAME), |
|
) |
|
reissue_pt_warnings(caught_warnings) |
|
elif is_sagemaker_mp_enabled(): |
|
opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False) |
|
smp.barrier() |
|
if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state: |
|
smp.save( |
|
opt_state_dict, |
|
os.path.join(output_dir, OPTIMIZER_NAME), |
|
partial=True, |
|
v3=smp.state.cfg.shard_optimizer_state, |
|
) |
|
if self.args.should_save: |
|
with warnings.catch_warnings(record=True) as caught_warnings: |
|
torch.save( |
|
self.lr_scheduler.state_dict(), |
|
os.path.join(output_dir, SCHEDULER_NAME), |
|
) |
|
reissue_pt_warnings(caught_warnings) |
|
if self.do_grad_scaling: |
|
torch.save( |
|
self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME) |
|
) |
|
elif self.args.should_save and not self.is_deepspeed_enabled: |
|
|
|
if self.fsdp: |
|
torch.save(full_osd, os.path.join(output_dir, OPTIMIZER_NAME)) |
|
else: |
|
torch.save( |
|
self.optimizer.state_dict(), |
|
os.path.join(output_dir, OPTIMIZER_NAME), |
|
) |
|
|
|
with warnings.catch_warnings(record=True) as caught_warnings: |
|
torch.save( |
|
self.lr_scheduler.state_dict(), |
|
os.path.join(output_dir, SCHEDULER_NAME), |
|
) |
|
reissue_pt_warnings(caught_warnings) |
|
if self.do_grad_scaling: |
|
torch.save( |
|
self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME) |
|
) |
|
|
|
|
|
if metrics is not None and self.args.metric_for_best_model is not None: |
|
metric_to_check = self.args.metric_for_best_model |
|
if not metric_to_check.startswith("eval_"): |
|
metric_to_check = f"eval_{metric_to_check}" |
|
metric_value = metrics[metric_to_check] |
|
|
|
operator = np.greater if self.args.greater_is_better else np.less |
|
if ( |
|
self.state.best_metric is None |
|
or self.state.best_model_checkpoint is None |
|
or operator(metric_value, self.state.best_metric) |
|
): |
|
self.state.best_metric = metric_value |
|
self.state.best_model_checkpoint = output_dir |
|
|
|
|
|
if self.args.should_save: |
|
self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) |
|
|
|
|
|
rng_states = { |
|
"python": random.getstate(), |
|
"numpy": np.random.get_state(), |
|
"cpu": torch.random.get_rng_state(), |
|
} |
|
if torch.cuda.is_available(): |
|
if self.args.parallel_mode == ParallelMode.DISTRIBUTED: |
|
|
|
rng_states["cuda"] = torch.cuda.random.get_rng_state_all() |
|
else: |
|
rng_states["cuda"] = torch.cuda.random.get_rng_state() |
|
|
|
if is_torch_tpu_available(): |
|
rng_states["xla"] = xm.get_rng_state() |
|
|
|
|
|
|
|
os.makedirs(output_dir, exist_ok=True) |
|
|
|
if self.args.world_size <= 1: |
|
torch.save(rng_states, os.path.join(output_dir, "rng_state.pth")) |
|
else: |
|
torch.save( |
|
rng_states, |
|
os.path.join(output_dir, f"rng_state_{self.args.process_index}.pth"), |
|
) |
|
|
|
if self.args.push_to_hub: |
|
self._push_from_checkpoint(output_dir) |
|
|
|
|
|
if self.args.should_save: |
|
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir) |