FrederikRautenberg commited on
Commit
b198353
·
1 Parent(s): f07fe35

debug threads

Browse files
Files changed (2) hide show
  1. app.py +2 -0
  2. pvq_manipulation/models/vits.py +5 -0
app.py CHANGED
@@ -12,6 +12,8 @@ import gradio as gr
12
  from pvq_manipulation.helper.creapy_wrapper import process_file
13
  from creapy.utils import config
14
 
 
 
15
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
16
  pvq_labels = ['Weight', 'Resonance', 'Breathiness', 'Roughness', 'Loudness', 'Strain', 'Pitch']
17
 
 
12
  from pvq_manipulation.helper.creapy_wrapper import process_file
13
  from creapy.utils import config
14
 
15
+ torch.set_num_threads(os.cpu_count() or 1)
16
+
17
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
18
  pvq_labels = ['Weight', 'Resonance', 'Breathiness', 'Roughness', 'Loudness', 'Strain', 'Pitch']
19
 
pvq_manipulation/models/vits.py CHANGED
@@ -246,8 +246,11 @@ class Vits_NT(Vits):
246
  y_mask=y_mask
247
  )
248
 
 
 
249
  if not torch.cuda.is_available():
250
  num_chunks = min(os.cpu_count() or 2, z.shape[-1])
 
251
  chunk_size = z.shape[-1] // num_chunks
252
  z_chunks = torch.split(z, chunk_size, dim=-1)
253
 
@@ -268,6 +271,8 @@ class Vits_NT(Vits):
268
  (z * y_mask)[:, :, : self.max_inference_len],
269
  g=speaker_embedding_man[:, :, None] if self.config.gan_speaker_conditioning else None
270
  )
 
 
271
  return o
272
 
273
  def forward(self, x, x_lengths, y, y_lengths, aux_input, inference=False):
 
246
  y_mask=y_mask
247
  )
248
 
249
+ import time
250
+ start = time.time()
251
  if not torch.cuda.is_available():
252
  num_chunks = min(os.cpu_count() or 2, z.shape[-1])
253
+ print(num_chunks, 'num chunks')
254
  chunk_size = z.shape[-1] // num_chunks
255
  z_chunks = torch.split(z, chunk_size, dim=-1)
256
 
 
271
  (z * y_mask)[:, :, : self.max_inference_len],
272
  g=speaker_embedding_man[:, :, None] if self.config.gan_speaker_conditioning else None
273
  )
274
+
275
+ print(time.time() - start)
276
  return o
277
 
278
  def forward(self, x, x_lengths, y, y_lengths, aux_input, inference=False):