rainym00d's picture
Upload folder using huggingface_hub
c96df66 verified
from transformers.trainer import *
class MyTrainer(Trainer):
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model checkpoint to {output_dir}")
# * START: delete below code
# supported_classes = (PreTrainedModel,) if not is_peft_available() else (PreTrainedModel, PeftModel)
# # Save a trained model and configuration using `save_pretrained()`.
# # They can then be reloaded using `from_pretrained()`
# if not isinstance(self.model, supported_classes):
# if state_dict is None:
# state_dict = self.model.state_dict()
# if isinstance(unwrap_model(self.model), supported_classes):
# unwrap_model(self.model).save_pretrained(
# output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors
# )
# else:
# logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
# if self.args.save_safetensors:
# safetensors.torch.save_file(state_dict, os.path.join(output_dir, SAFE_WEIGHTS_NAME))
# else:
# torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
# else:
# self.model.save_pretrained(
# output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors
# )
# if self.tokenizer is not None:
# self.tokenizer.save_pretrained(output_dir)
# * END
# * STRAT: my code
if not hasattr(self.model, "save"):
raise NotImplementedError(
f"MODEL {self.model.__class__.__name__} "
f"does not support save interface"
)
else:
deepspeed = False
if self.deepspeed:
deepspeed = True
self.model.save(output_dir, deepspeed=deepspeed)
# * END
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is None and trial is None:
self.store_flos()
run_dir = self._get_output_dir(trial=trial)
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir, _internal_call=True)
# * STRAT: delete below code
# if self.is_deepspeed_enabled:
# # under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed
# # config `stage3_gather_16bit_weights_on_model_save` is True
# self.model_wrapped.save_checkpoint(output_dir)
# * END
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if self.fsdp:
# FSDP has a different interface for saving optimizer states.
# Needs to be called on all ranks to gather all states.
# full_optim_state_dict will be deprecated after Pytorch 2.2!
full_osd = self.model.__class__.full_optim_state_dict(
self.model, self.optimizer
)
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(
self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)
)
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(
self.lr_scheduler.state_dict(),
os.path.join(output_dir, SCHEDULER_NAME),
)
reissue_pt_warnings(caught_warnings)
elif is_sagemaker_mp_enabled():
opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False)
smp.barrier()
if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state:
smp.save(
opt_state_dict,
os.path.join(output_dir, OPTIMIZER_NAME),
partial=True,
v3=smp.state.cfg.shard_optimizer_state,
)
if self.args.should_save:
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(
self.lr_scheduler.state_dict(),
os.path.join(output_dir, SCHEDULER_NAME),
)
reissue_pt_warnings(caught_warnings)
if self.do_grad_scaling:
torch.save(
self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME)
)
elif self.args.should_save and not self.is_deepspeed_enabled:
# deepspeed.save_checkpoint above saves model/optim/sched
if self.fsdp:
torch.save(full_osd, os.path.join(output_dir, OPTIMIZER_NAME))
else:
torch.save(
self.optimizer.state_dict(),
os.path.join(output_dir, OPTIMIZER_NAME),
)
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(
self.lr_scheduler.state_dict(),
os.path.join(output_dir, SCHEDULER_NAME),
)
reissue_pt_warnings(caught_warnings)
if self.do_grad_scaling:
torch.save(
self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME)
)
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.args.should_save:
self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME))
# Save RNG state in non-distributed training
rng_states = {
"python": random.getstate(),
"numpy": np.random.get_state(),
"cpu": torch.random.get_rng_state(),
}
if torch.cuda.is_available():
if self.args.parallel_mode == ParallelMode.DISTRIBUTED:
# In non distributed, we save the global CUDA RNG state (will take care of DataParallel)
rng_states["cuda"] = torch.cuda.random.get_rng_state_all()
else:
rng_states["cuda"] = torch.cuda.random.get_rng_state()
if is_torch_tpu_available():
rng_states["xla"] = xm.get_rng_state()
# A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may
# not yet exist.
os.makedirs(output_dir, exist_ok=True)
if self.args.world_size <= 1:
torch.save(rng_states, os.path.join(output_dir, "rng_state.pth"))
else:
torch.save(
rng_states,
os.path.join(output_dir, f"rng_state_{self.args.process_index}.pth"),
)
if self.args.push_to_hub:
self._push_from_checkpoint(output_dir)
# Maybe delete some older checkpoints.
if self.args.should_save:
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)