|
{ |
|
"best_score": 0.8721797651929005, |
|
"best_model_path": "/root/.AUQUA-S/ULTIMATE-PERFECTION-B/iteration3_refine2_intelligibility_focus_var1.5_iter3_base_enhancement_1.2", |
|
"best_strategy": "intelligibility_focus_var1.5_iter3_base_enhancement_1.2", |
|
"best_metrics": { |
|
"mel_cepstral_distortion": 0.9410074203776098, |
|
"word_error_rate": 0.06832895045858953, |
|
"naturalness": 0.9427855135831692, |
|
"intelligibility": 0.9607099322388966, |
|
"speaker_similarity": 0.9581935254159257, |
|
"prosody": 0.9428288652210755, |
|
"overall_quality": 0.9857114735308742, |
|
"weighted_score": 0.8721797651929005 |
|
}, |
|
"iterations_performed": 3, |
|
"final_model_path": "/root/.AUQUA-S/ULTIMATE-PERFECTION-B/top1_optimized_model", |
|
"optimization_timestamp": "2025-05-23T06:15:14.301968", |
|
"seed": 1747978167, |
|
"total_strategies_tested": 32, |
|
"strategy_effectiveness": { |
|
"attention_scale": 1.2999999999999998, |
|
"output_scale": 1.4, |
|
"projection_scale": 1.6, |
|
"encoder_scale": 1.5, |
|
"decoder_scale": 1.2, |
|
"base_enhancement": 0.003120722111476427, |
|
"importance_factor": 1.5597398022056095 |
|
} |
|
} |