File size: 4,094 Bytes
440e6c3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77711e1
440e6c3
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import random
import os
import tempfile
import pretty_midi

def pitch_to_name(pitch):
    notes = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
    return notes[pitch % 12]

def create_ytpmv(midi_file, video_files):
    """
    Generate an advanced YTPMV: Parse MIDI for BPM, key, notes. Sync video clip lengths to beats.
    Randomly cut clips from videos, concatenate, trim to MIDI length.
    """
    if not video_files:
        return None, "Please upload at least one video file.", "No analysis available."

    analysis = "No MIDI uploaded. Using default BPM: 120, total length: 30s."
    bpm = 120
    total_length = 30
    beat_duration = 60 / bpm

    if midi_file:
        try:
            pm = pretty_midi.PrettyMIDI(midi_file.name)
            bpm = pm.estimate_tempo()
            beat_duration = 60 / bpm
            total_length = pm.get_end_time()

            # Extract notes for unique list and pitch class frequencies for key detection
            notes_freq = {i: 0 for i in range(12)}
            all_notes = []
            for inst in pm.instruments:
                for note in inst.notes:
                    pc = note.pitch % 12
                    notes_freq[pc] += 1
                    all_notes.append(note.pitch)

            # Simple key detection: most frequent pitch class as tonic, correlate with major/minor profiles
            tonic = max(notes_freq, key=notes_freq.get)
            tonic_name = pitch_to_name(tonic)

            major_profile = [6.35, 2.23, 3.48, 2.33, 4.38, 4.09, 2.52, 5.19, 2.39, 3.66, 2.29, 2.88]
            minor_profile = [6.33, 2.68, 3.52, 3.52, 2.54, 4.38, 3.25, 5.37, 2.86, 3.02, 2.71, 3.53]

            def correlate(profile, freqs):
                return sum(freqs.get((tonic + i) % 12, 0) * profile[i] for i in range(12))

            major_corr = correlate(major_profile, notes_freq)
            minor_corr = correlate(minor_profile, notes_freq)
            key_type = 'major' if major_corr > minor_corr else 'minor'

            unique_notes = sorted(set(pitch_to_name(n) for n in all_notes))
            analysis = f"BPM: {bpm:.2f}\nKey: {tonic_name} {key_type}\nNotes: {', '.join(unique_notes)}"
        except Exception as e:
            print(f"MIDI analysis error: {e}")
            analysis = f"MIDI analysis failed: {e}. Using defaults."

    clips = []
    for video_path in video_files:
        try:
            clip = VideoFileClip(video_path.name)
            if clip.duration > 0:
                # Clip length synced to 1-8 beats
                length = random.randint(1, 8) * beat_duration
                if length > clip.duration:
                    length = clip.duration
                start = random.uniform(0, clip.duration - length)
                end = start + length
                short_clip = clip.subclip(start, end)
                clips.append(short_clip)
        except Exception as e:
            print(f"Error processing video {video_path.name}: {e}")
            continue

    if not clips:
        return None, "No valid video clips could be processed.", analysis

    # Concatenate, limit clips for performance, trim to total_length
    final_clip = concatenate_videoclips(clips[:20], method="compose")  # Increased limit slightly
    if final_clip.duration > total_length:
        final_clip = final_clip.subclip(0, total_length)

    # Save to temp file
    with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmp:
        final_clip.write_videofile(tmp.name, codec="libx264", audio_codec="aac", verbose=False, logger=None)
        output_path = tmp.name

    final_clip.close()
    for c in clips:
        c.close()

    # Future: Render MIDI audio with fluidsynth and set_audio(final_clip, rendered_audio)

    status = f"Generated YTPMV ({total_length:.2f}s) from {len(clips)} beat-synced clips."
    return output_path, status, analysis

down("- Deploy on Hugging Face Spaces with `requirements.txt` (includes gradio, moviepy, pretty_midi).")
    

if __name__ == "__main__":
    demo.launch(server_name="0.0.0.0", server_port=7860, share=True)