Spaces:
Sleeping
Sleeping
PRamoneda
commited on
Commit
·
58729a4
1
Parent(s):
9bedce4
solved problems with hf hub 2
Browse files
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
/models/
|
app.py
CHANGED
@@ -1,39 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from huggingface_hub import hf_hub_download
|
2 |
import torch
|
3 |
-
import
|
|
|
4 |
|
5 |
REPO_ID = "pramoneda/audio"
|
6 |
CACHE_BASE = "models"
|
7 |
|
8 |
-
|
9 |
-
def download_model_checkpoint(model_name: str, checkpoint_id: int):
|
10 |
-
filename = f"{model_name}/checkpoint_{checkpoint_id}_clean.pth"
|
11 |
cache_dir = os.path.join(CACHE_BASE, model_name)
|
|
|
12 |
|
13 |
-
|
|
|
|
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
|
|
|
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
if __name__ == "__main__":
|
39 |
-
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from get_difficulty import predict_difficulty
|
3 |
+
import tempfile
|
4 |
+
import os
|
5 |
+
from pydub import AudioSegment
|
6 |
+
import yt_dlp
|
7 |
+
import mimetypes
|
8 |
from huggingface_hub import hf_hub_download
|
9 |
import torch
|
10 |
+
import sys
|
11 |
+
import io
|
12 |
|
13 |
REPO_ID = "pramoneda/audio"
|
14 |
CACHE_BASE = "models"
|
15 |
|
16 |
+
def download_model_checkpoints(model_name: str, num_checkpoints: int = 5):
|
|
|
|
|
17 |
cache_dir = os.path.join(CACHE_BASE, model_name)
|
18 |
+
os.makedirs(cache_dir, exist_ok=True)
|
19 |
|
20 |
+
for checkpoint_id in range(num_checkpoints):
|
21 |
+
filename = f"{model_name}/checkpoint_{checkpoint_id}.pth"
|
22 |
+
local_path = os.path.join(cache_dir, f"checkpoint_{checkpoint_id}.pth")
|
23 |
|
24 |
+
if not os.path.exists(local_path):
|
25 |
+
print(f"Downloading {filename} from {REPO_ID} to {cache_dir}")
|
26 |
+
path = hf_hub_download(
|
27 |
+
repo_id=REPO_ID,
|
28 |
+
filename=filename,
|
29 |
+
cache_dir=cache_dir
|
30 |
+
)
|
31 |
+
# Copy to expected location
|
32 |
+
if path != local_path:
|
33 |
+
import shutil
|
34 |
+
shutil.copy(path, local_path)
|
35 |
|
36 |
+
def download_youtube_audio(url):
|
37 |
+
output_path = "yt_audio.%(ext)s"
|
38 |
+
ydl_opts = {
|
39 |
+
"format": "bestaudio/best",
|
40 |
+
"outtmpl": output_path,
|
41 |
+
"postprocessors": [{
|
42 |
+
"key": "FFmpegExtractAudio",
|
43 |
+
"preferredcodec": "mp3",
|
44 |
+
"preferredquality": "192",
|
45 |
+
}],
|
46 |
+
"quiet": True,
|
47 |
+
"no_warnings": True
|
48 |
+
}
|
49 |
|
50 |
+
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
51 |
+
ydl.download([url])
|
52 |
|
53 |
+
return "yt_audio.mp3"
|
54 |
+
|
55 |
+
def convert_to_mp3(input_path):
|
56 |
+
audio = AudioSegment.from_file(input_path)
|
57 |
+
temp_audio = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
|
58 |
+
audio.export(temp_audio.name, format="mp3")
|
59 |
+
return temp_audio.name
|
60 |
+
|
61 |
+
def process_input(input_file, youtube_url):
|
62 |
+
captured_output = io.StringIO()
|
63 |
+
sys.stdout = captured_output
|
64 |
+
|
65 |
+
audio_path = None
|
66 |
+
mp3_path = None
|
67 |
+
|
68 |
+
if youtube_url:
|
69 |
+
audio_path = download_youtube_audio(youtube_url)
|
70 |
+
mp3_path = audio_path
|
71 |
+
elif input_file:
|
72 |
+
mime_type, _ = mimetypes.guess_type(input_file)
|
73 |
+
if mime_type and mime_type.startswith("video/"):
|
74 |
+
audio_path = convert_to_mp3(input_file)
|
75 |
+
mp3_path = audio_path
|
76 |
+
else:
|
77 |
+
audio_path = convert_to_mp3(input_file)
|
78 |
+
mp3_path = audio_path
|
79 |
+
else:
|
80 |
+
sys.stdout = sys.__stdout__
|
81 |
+
return "No audio or video provided.", None, None, None
|
82 |
+
|
83 |
+
model_cqt = "audio_midi_cqt5_ps_v5"
|
84 |
+
model_pr = "audio_midi_pianoroll_ps_5_v4"
|
85 |
+
model_multi = "audio_midi_multi_ps_v5"
|
86 |
+
|
87 |
+
download_model_checkpoints(model_cqt)
|
88 |
+
download_model_checkpoints(model_pr)
|
89 |
+
download_model_checkpoints(model_multi)
|
90 |
+
|
91 |
+
diff_cqt = predict_difficulty(audio_path, model_name=model_cqt, rep="cqt5")
|
92 |
+
diff_pr = predict_difficulty(audio_path, model_name=model_pr, rep="pianoroll5")
|
93 |
+
diff_multi = predict_difficulty(audio_path, model_name=model_multi, rep="multimodal5")
|
94 |
+
|
95 |
+
sys.stdout = sys.__stdout__
|
96 |
+
log_output = captured_output.getvalue()
|
97 |
+
|
98 |
+
midi_path = "temp.mid"
|
99 |
+
if not os.path.exists(midi_path):
|
100 |
+
return "MIDI not generated.", None, None, None, log_output
|
101 |
+
|
102 |
+
difficulty_text = (
|
103 |
+
f"CQT difficulty: {diff_cqt}\n"
|
104 |
+
f"Pianoroll difficulty: {diff_pr}\n"
|
105 |
+
f"Multimodal difficulty: {diff_multi}"
|
106 |
+
)
|
107 |
+
|
108 |
+
return difficulty_text, midi_path, midi_path, mp3_path, log_output
|
109 |
|
110 |
+
demo = gr.Interface(
|
111 |
+
fn=process_input,
|
112 |
+
inputs=[
|
113 |
+
gr.File(label="Upload MP3 or MP4", type="filepath"),
|
114 |
+
gr.Textbox(label="YouTube URL")
|
115 |
+
],
|
116 |
+
outputs=[
|
117 |
+
gr.Textbox(label="Difficulty predictions"),
|
118 |
+
gr.File(label="Generated MIDI"),
|
119 |
+
gr.Audio(label="MIDI Playback", type="filepath"),
|
120 |
+
gr.Audio(label="Extracted MP3 Preview", type="filepath"),
|
121 |
+
gr.Textbox(label="Console Output")
|
122 |
+
],
|
123 |
+
title="Music Difficulty Estimator",
|
124 |
+
description="Upload an MP3, MP4, or provide a YouTube URL. It extracts audio, predicts difficulty, and generates a MIDI file."
|
125 |
+
)
|
126 |
|
127 |
if __name__ == "__main__":
|
128 |
+
demo.launch()
|
model.py
ADDED
@@ -0,0 +1,344 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import math
|
3 |
+
import os
|
4 |
+
from statistics import mean, stdev
|
5 |
+
import torch
|
6 |
+
from sklearn.metrics import mean_squared_error, balanced_accuracy_score
|
7 |
+
from torch import nn
|
8 |
+
from torch.nn import functional as F
|
9 |
+
import utils
|
10 |
+
from utils import prediction2label
|
11 |
+
from scipy.stats import kendalltau
|
12 |
+
|
13 |
+
|
14 |
+
class ordinal_loss(nn.Module):
|
15 |
+
"""Ordinal regression with encoding as in https://arxiv.org/pdf/0704.1028.pdf"""
|
16 |
+
|
17 |
+
def __init__(self, weight_class=False):
|
18 |
+
super(ordinal_loss, self).__init__()
|
19 |
+
self.weights = weight_class
|
20 |
+
|
21 |
+
def forward(self, predictions, targets):
|
22 |
+
# Fill in ordinalCoefficientVariationLoss target function, i.e. 0 -> [1,0,0,...]
|
23 |
+
modified_target = torch.zeros_like(predictions)
|
24 |
+
for i, target in enumerate(targets):
|
25 |
+
modified_target[i, 0:target + 1] = 1
|
26 |
+
|
27 |
+
# if torch tensor is empty, return 0
|
28 |
+
if predictions.shape[0] == 0:
|
29 |
+
return 0
|
30 |
+
# loss
|
31 |
+
if self.weights is not None:
|
32 |
+
|
33 |
+
return torch.sum((self.weights * F.mse_loss(predictions, modified_target, reduction="none")).mean(axis=1))
|
34 |
+
else:
|
35 |
+
return torch.sum((F.mse_loss(predictions, modified_target, reduction="none")).mean(axis=1))
|
36 |
+
|
37 |
+
import numpy as np
|
38 |
+
import matplotlib.pyplot as plt
|
39 |
+
import seaborn as sns
|
40 |
+
from sklearn.metrics import confusion_matrix
|
41 |
+
|
42 |
+
class ContextAttention(nn.Module):
|
43 |
+
def __init__(self, size, num_head):
|
44 |
+
super(ContextAttention, self).__init__()
|
45 |
+
self.attention_net = nn.Linear(size, size)
|
46 |
+
self.num_head = num_head
|
47 |
+
|
48 |
+
if size % num_head != 0:
|
49 |
+
raise ValueError("size must be dividable by num_head", size, num_head)
|
50 |
+
self.head_size = int(size / num_head)
|
51 |
+
self.context_vector = torch.nn.Parameter(torch.Tensor(num_head, self.head_size, 1))
|
52 |
+
nn.init.uniform_(self.context_vector, a=-1, b=1)
|
53 |
+
|
54 |
+
def get_attention(self, x):
|
55 |
+
attention = self.attention_net(x)
|
56 |
+
attention_tanh = torch.tanh(attention)
|
57 |
+
attention_split = torch.stack(attention_tanh.split(split_size=self.head_size, dim=2), dim=0)
|
58 |
+
similarity = torch.bmm(attention_split.view(self.num_head, -1, self.head_size), self.context_vector)
|
59 |
+
similarity = similarity.view(self.num_head, x.shape[0], -1).permute(1, 2, 0)
|
60 |
+
return similarity
|
61 |
+
|
62 |
+
def forward(self, x):
|
63 |
+
attention = self.attention_net(x)
|
64 |
+
attention_tanh = torch.tanh(attention)
|
65 |
+
if self.head_size != 1:
|
66 |
+
attention_split = torch.stack(attention_tanh.split(split_size=self.head_size, dim=2), dim=0)
|
67 |
+
similarity = torch.bmm(attention_split.view(self.num_head, -1, self.head_size), self.context_vector)
|
68 |
+
similarity = similarity.view(self.num_head, x.shape[0], -1).permute(1, 2, 0)
|
69 |
+
similarity[x.sum(-1) == 0] = -1e4 # mask out zero padded_ones
|
70 |
+
softmax_weight = torch.softmax(similarity, dim=1)
|
71 |
+
|
72 |
+
x_split = torch.stack(x.split(split_size=self.head_size, dim=2), dim=2)
|
73 |
+
weighted_x = x_split * softmax_weight.unsqueeze(-1).repeat(1, 1, 1, x_split.shape[-1])
|
74 |
+
attention = weighted_x.view(x_split.shape[0], x_split.shape[1], x.shape[-1])
|
75 |
+
else:
|
76 |
+
softmax_weight = torch.softmax(attention, dim=1)
|
77 |
+
attention = softmax_weight * x
|
78 |
+
|
79 |
+
sum_attention = torch.sum(attention, dim=1)
|
80 |
+
return sum_attention
|
81 |
+
|
82 |
+
|
83 |
+
class ResidualBlock(nn.Module):
|
84 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
|
85 |
+
super(ResidualBlock, self).__init__()
|
86 |
+
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding)
|
87 |
+
self.bn1 = nn.BatchNorm2d(out_channels)
|
88 |
+
self.relu = nn.ReLU()
|
89 |
+
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size, stride, padding)
|
90 |
+
self.bn2 = nn.BatchNorm2d(out_channels)
|
91 |
+
self.shortcut = nn.Sequential()
|
92 |
+
if in_channels != out_channels:
|
93 |
+
self.shortcut = nn.Sequential(
|
94 |
+
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride),
|
95 |
+
nn.BatchNorm2d(out_channels)
|
96 |
+
)
|
97 |
+
|
98 |
+
def forward(self, x):
|
99 |
+
identity = self.shortcut(x)
|
100 |
+
|
101 |
+
out = self.relu(self.bn1(self.conv1(x)))
|
102 |
+
out = self.bn2(self.conv2(out))
|
103 |
+
out += identity # Skip Connection
|
104 |
+
out = self.relu(out)
|
105 |
+
return out
|
106 |
+
|
107 |
+
|
108 |
+
def get_conv_layer(rep_name):
|
109 |
+
if "pianoroll" in rep_name:
|
110 |
+
in_channels = 2
|
111 |
+
kernel_width = (3, 4, 4) # 88
|
112 |
+
elif "mel" in rep_name:
|
113 |
+
in_channels = 1
|
114 |
+
kernel_width = (3, 4, 4) # 64
|
115 |
+
elif "cqt" in rep_name:
|
116 |
+
in_channels = 1
|
117 |
+
kernel_width = (3, 4, 4) # 88
|
118 |
+
else:
|
119 |
+
raise ValueError("Representation not implemented")
|
120 |
+
|
121 |
+
if "5" in rep_name:
|
122 |
+
kernel_height = (3, 4, 4)
|
123 |
+
elif "10" in rep_name:
|
124 |
+
kernel_height = (4, 5, 5)
|
125 |
+
elif "20" in rep_name:
|
126 |
+
kernel_height = (4, 6, 6)
|
127 |
+
else:
|
128 |
+
raise ValueError("Representation not implemented")
|
129 |
+
|
130 |
+
convs = nn.Sequential(
|
131 |
+
ResidualBlock(in_channels, 64, 3, 1, 1),
|
132 |
+
nn.MaxPool2d((kernel_height[0], kernel_width[0])), # Adjusted pooling to handle increased length
|
133 |
+
nn.Dropout(0.1),
|
134 |
+
ResidualBlock(64, 128, 3, 1, 1),
|
135 |
+
nn.MaxPool2d((kernel_height[1], kernel_width[1])), # Adjusted pooling
|
136 |
+
nn.Dropout(0.1),
|
137 |
+
ResidualBlock(128, 256, 3, 1, 1),
|
138 |
+
nn.MaxPool2d((kernel_height[2], kernel_width[2])), # Adjusted pooling
|
139 |
+
nn.Dropout(0.1)
|
140 |
+
)
|
141 |
+
return convs
|
142 |
+
|
143 |
+
|
144 |
+
class multimodal_cnns(nn.Module):
|
145 |
+
|
146 |
+
def __init__(self, modality_dropout, only_cqt=False, only_pr=False):
|
147 |
+
super().__init__()
|
148 |
+
|
149 |
+
self.midi_branch = get_conv_layer("pianoroll5")
|
150 |
+
self.audio_branch = get_conv_layer("cqt5")
|
151 |
+
self.modality_dropout = modality_dropout
|
152 |
+
self.only_cqt = only_cqt
|
153 |
+
self.only_pr = only_pr
|
154 |
+
|
155 |
+
def forward(self, x):
|
156 |
+
x_midi, x_audio = x
|
157 |
+
x_midi = self.midi_branch(x_midi).squeeze(-1)
|
158 |
+
x_audio = self.audio_branch(x_audio).squeeze(-1)
|
159 |
+
# do a modality dropout
|
160 |
+
if self.only_cqt:
|
161 |
+
x_midi = torch.zeros_like(x_midi, device=x_midi.device)
|
162 |
+
elif self.only_pr:
|
163 |
+
x_audio = torch.zeros_like(x_audio, device=x_audio.device)
|
164 |
+
x_midi_trimmed = x_midi[:, :, :x_audio.size(2)]
|
165 |
+
|
166 |
+
cnns_out = torch.cat((x_midi_trimmed, x_audio), 1)
|
167 |
+
return cnns_out
|
168 |
+
|
169 |
+
|
170 |
+
class AudioModel(nn.Module):
|
171 |
+
def __init__(self, num_classes, rep, modality_dropout, only_cqt=False, only_pr=False):
|
172 |
+
super(AudioModel, self).__init__()
|
173 |
+
|
174 |
+
# All Convolutional Layers in a Sequential Block
|
175 |
+
if "pianoroll" in rep:
|
176 |
+
conv = get_conv_layer(rep)
|
177 |
+
elif "cqt" in rep:
|
178 |
+
conv = get_conv_layer(rep)
|
179 |
+
elif "mel" in rep:
|
180 |
+
conv = get_conv_layer(rep)
|
181 |
+
elif "multi" in rep:
|
182 |
+
conv = multimodal_cnns(modality_dropout, only_cqt, only_pr)
|
183 |
+
self.conv_layers = conv
|
184 |
+
|
185 |
+
# Calculate the size of GRU input feature
|
186 |
+
self.gru_input_size = 512 if "multi" in rep else 256
|
187 |
+
|
188 |
+
# GRU Layer
|
189 |
+
self.gru = nn.GRU(input_size=self.gru_input_size, hidden_size=128, num_layers=2,
|
190 |
+
batch_first=True, bidirectional=True)
|
191 |
+
|
192 |
+
self.context_attention = ContextAttention(size=256, num_head=4)
|
193 |
+
self.non_linearity = nn.ReLU()
|
194 |
+
|
195 |
+
# Fully connected layer
|
196 |
+
self.fc = nn.Linear(256, num_classes)
|
197 |
+
|
198 |
+
def forward(self, x1, kk):
|
199 |
+
# Applying Convolutional Block
|
200 |
+
# print(x1.shape)
|
201 |
+
|
202 |
+
x = self.conv_layers(x1)
|
203 |
+
# Reshape for GRU input
|
204 |
+
x = x.squeeze().transpose(0, 1).unsqueeze(0) # Reshaping to [batch, seq_len, features]
|
205 |
+
# print(x.shape)
|
206 |
+
x, _ = self.gru(x)
|
207 |
+
# Attention
|
208 |
+
x = self.context_attention(x)
|
209 |
+
# classiffier
|
210 |
+
x = self.non_linearity(x)
|
211 |
+
x = self.fc(x)
|
212 |
+
return x
|
213 |
+
|
214 |
+
|
215 |
+
def get_mse_macro(y_true, y_pred):
|
216 |
+
mse_each_class = []
|
217 |
+
for true_class in set(y_true):
|
218 |
+
tt, pp = zip(*[[tt, pp] for tt, pp in zip(y_true, y_pred) if tt == true_class])
|
219 |
+
mse_each_class.append(mean_squared_error(y_true=tt, y_pred=pp))
|
220 |
+
return mean(mse_each_class)
|
221 |
+
|
222 |
+
|
223 |
+
def get_cqt(rep, k):
|
224 |
+
inp_data = utils.load_binary(f"../videos_download/{rep}/{k}.bin")
|
225 |
+
inp_data = torch.tensor(inp_data, dtype=torch.float32).cuda()
|
226 |
+
inp_data = inp_data.unsqueeze(0).unsqueeze(0).transpose(2, 3)
|
227 |
+
return inp_data
|
228 |
+
|
229 |
+
|
230 |
+
def get_pianoroll(rep, k):
|
231 |
+
inp_pr = utils.load_binary(f"../videos_download/{rep}/{k}.bin")
|
232 |
+
inp_on = utils.load_binary(f"../videos_download/{rep}/{k}_onset.bin")
|
233 |
+
inp_pr = torch.from_numpy(inp_pr).float().cuda()
|
234 |
+
inp_on = torch.from_numpy(inp_on).float().cuda()
|
235 |
+
inp_data = torch.stack([inp_pr, inp_on], dim=1)
|
236 |
+
inp_data = inp_data.unsqueeze(0).permute(0, 1, 2, 3)
|
237 |
+
return inp_data
|
238 |
+
|
239 |
+
def compute_model_basic(model_name, rep, modality_dropout, only_cqt=False, only_pr=False):
|
240 |
+
seed = 42
|
241 |
+
np.random.seed(seed)
|
242 |
+
torch.manual_seed(seed)
|
243 |
+
if torch.cuda.is_available():
|
244 |
+
torch.cuda.manual_seed(seed)
|
245 |
+
data = utils.load_json("../videos_download/split_audio.json")
|
246 |
+
mse, acc = [], []
|
247 |
+
predictions = []
|
248 |
+
if only_cqt:
|
249 |
+
cache_name = model_name + "_cqt"
|
250 |
+
elif only_pr:
|
251 |
+
cache_name = model_name + "_pr"
|
252 |
+
else:
|
253 |
+
cache_name = model_name
|
254 |
+
if not os.path.exists(f"cache/{cache_name}.json"):
|
255 |
+
for split in range(5):
|
256 |
+
#load_model
|
257 |
+
model = AudioModel(11, rep, modality_dropout, only_cqt, only_pr)
|
258 |
+
checkpoint = torch.load(f"models/{model_name}/checkpoint_{split}.pth", map_location='cuda:0')
|
259 |
+
# print(checkpoint["epoch"])
|
260 |
+
# print(checkpoint.keys())
|
261 |
+
|
262 |
+
model.load_state_dict(checkpoint['model_state_dict'])
|
263 |
+
model = model.cuda()
|
264 |
+
pred_labels, true_labels = [], []
|
265 |
+
predictions_split = {}
|
266 |
+
model.eval()
|
267 |
+
with torch.inference_mode():
|
268 |
+
for k, ps in data[str(split)]["test"].items():
|
269 |
+
# computar el modelo
|
270 |
+
if "cqt" in rep:
|
271 |
+
inp_data = get_cqt(rep, k)
|
272 |
+
elif "pianoroll" in rep:
|
273 |
+
inp_data = get_pianoroll(rep, k)
|
274 |
+
elif rep == "multimodal5":
|
275 |
+
x1 = get_pianoroll("pianoroll5", k)
|
276 |
+
x2 = get_cqt("cqt5", k)[:, :, :x1.shape[2]]
|
277 |
+
inp_data = [x1, x2]
|
278 |
+
log_prob = model(inp_data, None)
|
279 |
+
pred = prediction2label(log_prob).cpu().tolist()[0]
|
280 |
+
print(k, ps, pred)
|
281 |
+
predictions_split[k] = {
|
282 |
+
"true": ps,
|
283 |
+
"pred": pred
|
284 |
+
}
|
285 |
+
|
286 |
+
true_labels.append(ps)
|
287 |
+
pred_labels.append(pred)
|
288 |
+
|
289 |
+
predictions.append(predictions_split)
|
290 |
+
mse.append(get_mse_macro(true_labels, pred_labels))
|
291 |
+
acc.append(balanced_accuracy_score(true_labels, pred_labels))
|
292 |
+
# with one decimal
|
293 |
+
print(f"mse: {mean(mse):.1f}({stdev(mse):.1f})", end=" ")
|
294 |
+
print(f"acc: {mean(acc)*100:.1f}({stdev(acc)*100:.1f})")
|
295 |
+
utils.save_json({
|
296 |
+
"mse": mse,
|
297 |
+
"acc": acc,
|
298 |
+
"predictions": predictions
|
299 |
+
}, f"cache/{cache_name}.json")
|
300 |
+
else:
|
301 |
+
data = utils.load_json(f"cache/{cache_name}.json")
|
302 |
+
tau_c, mse, acc = [], [], []
|
303 |
+
for i in range(5):
|
304 |
+
pred, true = [], []
|
305 |
+
for k, dd in data["predictions"][i].items():
|
306 |
+
pred.append(dd["pred"])
|
307 |
+
true.append(dd["true"])
|
308 |
+
tau_c.append(kendalltau(x=true, y=pred).statistic)
|
309 |
+
mse.append(get_mse_macro(true, pred))
|
310 |
+
acc.append(balanced_accuracy_score(true, pred))
|
311 |
+
print(model_name, end="// ")
|
312 |
+
print(f"& {mean(mse):.2f}({stdev(mse):.2f})", end=" ")
|
313 |
+
print(f"& {mean(acc) * 100:.1f}({stdev(acc) * 100:.2f})", end=" ")
|
314 |
+
print(f"& {mean(tau_c):.3f}({stdev(tau_c):.3f})")
|
315 |
+
|
316 |
+
|
317 |
+
def compute_ensemble(truncate=False):
|
318 |
+
round_func = lambda x: math.ceil(x) if truncate else math.floor(x)
|
319 |
+
data_pr = utils.load_json(f"cache/audio_midi_cqt5_ps_v5.json")
|
320 |
+
data_cqt = utils.load_json(f"cache/audio_midi_pianoroll_ps_5_v4.json")
|
321 |
+
tau_c, mse, acc = [], [], []
|
322 |
+
for i in range(5):
|
323 |
+
pred, true = [], []
|
324 |
+
for k, dd in data_pr["predictions"][i].items():
|
325 |
+
cqt_pred = data_cqt["predictions"][i][k]
|
326 |
+
pred.append(round_func((dd["pred"] + cqt_pred["pred"])/2))
|
327 |
+
true.append(dd["true"])
|
328 |
+
tau_c.append(kendalltau(x=true, y=pred).statistic)
|
329 |
+
mse.append(get_mse_macro(true, pred))
|
330 |
+
acc.append(balanced_accuracy_score(true, pred))
|
331 |
+
print("ensemble", end="// ")
|
332 |
+
print(f"& {mean(mse):.2f}({stdev(mse):.2f})", end=" ")
|
333 |
+
print(f"& {mean(acc) * 100:.1f}({stdev(acc) * 100:.2f})", end=" ")
|
334 |
+
print(f"& {mean(tau_c):.3f}({stdev(tau_c):.3f})")
|
335 |
+
|
336 |
+
|
337 |
+
def load_json(name_file):
|
338 |
+
with open(name_file, 'r') as fp:
|
339 |
+
data = json.load(fp)
|
340 |
+
return data
|
341 |
+
|
342 |
+
|
343 |
+
|
344 |
+
|
temp.mid
ADDED
Binary file (4.45 kB). View file
|
|