nwo
stringlengths 10
28
| sha
stringlengths 40
40
| path
stringlengths 11
97
| identifier
stringlengths 1
64
| parameters
stringlengths 2
2.24k
| return_statement
stringlengths 0
2.17k
| docstring
stringlengths 0
5.45k
| docstring_summary
stringlengths 0
3.83k
| func_begin
int64 1
13.4k
| func_end
int64 2
13.4k
| function
stringlengths 28
56.4k
| url
stringlengths 106
209
| project
int64 1
48
| executed_lines
list | executed_lines_pc
float64 0
153
| missing_lines
list | missing_lines_pc
float64 0
100
| covered
bool 2
classes | filecoverage
float64 2.53
100
| function_lines
int64 2
1.46k
| mccabe
int64 1
253
| coverage
float64 0
100
| docstring_lines
int64 0
112
| function_nodoc
stringlengths 9
56.4k
| id
int64 0
29.8k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/Clip.py
|
Clip.subclip
|
(self, start_time=0, end_time=None)
|
return new_clip
|
Returns a clip playing the content of the current clip between times
``start_time`` and ``end_time``, which can be expressed in seconds
(15.35), in (min, sec), in (hour, min, sec), or as a string:
'01:03:05.35'.
The ``mask`` and ``audio`` of the resulting subclip will be subclips of
``mask`` and ``audio`` the original clip, if they exist.
Parameters
----------
start_time : float or tuple or str, optional
Moment that will be chosen as the beginning of the produced clip. If
is negative, it is reset to ``clip.duration + start_time``.
end_time : float or tuple or str, optional
Moment that will be chosen as the end of the produced clip. If not
provided, it is assumed to be the duration of the clip (potentially
infinite). If is negative, it is reset to ``clip.duration + end_time``.
For instance:
>>> # cut the last two seconds of the clip:
>>> new_clip = clip.subclip(0, -2)
If ``end_time`` is provided or if the clip has a duration attribute,
the duration of the returned clip is set automatically.
|
Returns a clip playing the content of the current clip between times
``start_time`` and ``end_time``, which can be expressed in seconds
(15.35), in (min, sec), in (hour, min, sec), or as a string:
'01:03:05.35'.
| 384 | 450 |
def subclip(self, start_time=0, end_time=None):
"""Returns a clip playing the content of the current clip between times
``start_time`` and ``end_time``, which can be expressed in seconds
(15.35), in (min, sec), in (hour, min, sec), or as a string:
'01:03:05.35'.
The ``mask`` and ``audio`` of the resulting subclip will be subclips of
``mask`` and ``audio`` the original clip, if they exist.
Parameters
----------
start_time : float or tuple or str, optional
Moment that will be chosen as the beginning of the produced clip. If
is negative, it is reset to ``clip.duration + start_time``.
end_time : float or tuple or str, optional
Moment that will be chosen as the end of the produced clip. If not
provided, it is assumed to be the duration of the clip (potentially
infinite). If is negative, it is reset to ``clip.duration + end_time``.
For instance:
>>> # cut the last two seconds of the clip:
>>> new_clip = clip.subclip(0, -2)
If ``end_time`` is provided or if the clip has a duration attribute,
the duration of the returned clip is set automatically.
"""
if start_time < 0:
# Make this more Python-like, a negative value means to move
# backward from the end of the clip
start_time = self.duration + start_time # Remember start_time is negative
if (self.duration is not None) and (start_time >= self.duration):
raise ValueError(
"start_time (%.02f) " % start_time
+ "should be smaller than the clip's "
+ "duration (%.02f)." % self.duration
)
new_clip = self.time_transform(lambda t: t + start_time, apply_to=[])
if (end_time is None) and (self.duration is not None):
end_time = self.duration
elif (end_time is not None) and (end_time < 0):
if self.duration is None:
raise ValueError(
(
"Subclip with negative times (here %s)"
" can only be extracted from clips with a ``duration``"
)
% (str((start_time, end_time)))
)
else:
end_time = self.duration + end_time
if end_time is not None:
new_clip.duration = end_time - start_time
new_clip.end = new_clip.start + new_clip.duration
return new_clip
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/Clip.py#L384-L450
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66
] | 100 |
[] | 0 | true | 100 | 67 | 10 | 100 | 26 |
def subclip(self, start_time=0, end_time=None):
if start_time < 0:
# Make this more Python-like, a negative value means to move
# backward from the end of the clip
start_time = self.duration + start_time # Remember start_time is negative
if (self.duration is not None) and (start_time >= self.duration):
raise ValueError(
"start_time (%.02f) " % start_time
+ "should be smaller than the clip's "
+ "duration (%.02f)." % self.duration
)
new_clip = self.time_transform(lambda t: t + start_time, apply_to=[])
if (end_time is None) and (self.duration is not None):
end_time = self.duration
elif (end_time is not None) and (end_time < 0):
if self.duration is None:
raise ValueError(
(
"Subclip with negative times (here %s)"
" can only be extracted from clips with a ``duration``"
)
% (str((start_time, end_time)))
)
else:
end_time = self.duration + end_time
if end_time is not None:
new_clip.duration = end_time - start_time
new_clip.end = new_clip.start + new_clip.duration
return new_clip
| 28,367 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/Clip.py
|
Clip.cutout
|
(self, start_time, end_time)
|
Returns a clip playing the content of the current clip but
skips the extract between ``start_time`` and ``end_time``, which can be
expressed in seconds (15.35), in (min, sec), in (hour, min, sec),
or as a string: '01:03:05.35'.
If the original clip has a ``duration`` attribute set,
the duration of the returned clip is automatically computed as
`` duration - (end_time - start_time)``.
The resulting clip's ``audio`` and ``mask`` will also be cutout
if they exist.
Parameters
----------
start_time : float or tuple or str
Moment from which frames will be ignored in the resulting output.
end_time : float or tuple or str
Moment until which frames will be ignored in the resulting output.
|
Returns a clip playing the content of the current clip but
skips the extract between ``start_time`` and ``end_time``, which can be
expressed in seconds (15.35), in (min, sec), in (hour, min, sec),
or as a string: '01:03:05.35'.
| 453 | 484 |
def cutout(self, start_time, end_time):
"""
Returns a clip playing the content of the current clip but
skips the extract between ``start_time`` and ``end_time``, which can be
expressed in seconds (15.35), in (min, sec), in (hour, min, sec),
or as a string: '01:03:05.35'.
If the original clip has a ``duration`` attribute set,
the duration of the returned clip is automatically computed as
`` duration - (end_time - start_time)``.
The resulting clip's ``audio`` and ``mask`` will also be cutout
if they exist.
Parameters
----------
start_time : float or tuple or str
Moment from which frames will be ignored in the resulting output.
end_time : float or tuple or str
Moment until which frames will be ignored in the resulting output.
"""
new_clip = self.time_transform(
lambda t: t + (t >= start_time) * (end_time - start_time),
apply_to=["audio", "mask"],
)
if self.duration is not None:
return new_clip.with_duration(self.duration - (end_time - start_time))
else: # pragma: no cover
return new_clip
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/Clip.py#L453-L484
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31
] | 106.666667 |
[] | 0 | true | 100 | 32 | 2 | 100 | 20 |
def cutout(self, start_time, end_time):
new_clip = self.time_transform(
lambda t: t + (t >= start_time) * (end_time - start_time),
apply_to=["audio", "mask"],
)
if self.duration is not None:
return new_clip.with_duration(self.duration - (end_time - start_time))
else: # pragma: no cover
return new_clip
| 28,368 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/Clip.py
|
Clip.iter_frames
|
(self, fps=None, with_times=False, logger=None, dtype=None)
|
Iterates over all the frames of the clip.
Returns each frame of the clip as a HxWxN Numpy array,
where N=1 for mask clips and N=3 for RGB clips.
This function is not really meant for video editing. It provides an
easy way to do frame-by-frame treatment of a video, for fields like
science, computer vision...
Parameters
----------
fps : int, optional
Frames per second for clip iteration. Is optional if the clip already
has a ``fps`` attribute.
with_times : bool, optional
Ff ``True`` yield tuples of ``(t, frame)`` where ``t`` is the current
time for the frame, otherwise only a ``frame`` object.
logger : str, optional
Either ``"bar"`` for progress bar or ``None`` or any Proglog logger.
dtype : type, optional
Type to cast Numpy array frames. Use ``dtype="uint8"`` when using the
pictures to write video, images...
Examples
--------
>>> # prints the maximum of red that is contained
>>> # on the first line of each frame of the clip.
>>> from moviepy import VideoFileClip
>>> myclip = VideoFileClip('myvideo.mp4')
>>> print ( [frame[0,:,0].max()
for frame in myclip.iter_frames()])
|
Iterates over all the frames of the clip.
| 488 | 540 |
def iter_frames(self, fps=None, with_times=False, logger=None, dtype=None):
"""Iterates over all the frames of the clip.
Returns each frame of the clip as a HxWxN Numpy array,
where N=1 for mask clips and N=3 for RGB clips.
This function is not really meant for video editing. It provides an
easy way to do frame-by-frame treatment of a video, for fields like
science, computer vision...
Parameters
----------
fps : int, optional
Frames per second for clip iteration. Is optional if the clip already
has a ``fps`` attribute.
with_times : bool, optional
Ff ``True`` yield tuples of ``(t, frame)`` where ``t`` is the current
time for the frame, otherwise only a ``frame`` object.
logger : str, optional
Either ``"bar"`` for progress bar or ``None`` or any Proglog logger.
dtype : type, optional
Type to cast Numpy array frames. Use ``dtype="uint8"`` when using the
pictures to write video, images...
Examples
--------
>>> # prints the maximum of red that is contained
>>> # on the first line of each frame of the clip.
>>> from moviepy import VideoFileClip
>>> myclip = VideoFileClip('myvideo.mp4')
>>> print ( [frame[0,:,0].max()
for frame in myclip.iter_frames()])
"""
logger = proglog.default_bar_logger(logger)
for frame_index in logger.iter_bar(
frame_index=np.arange(0, int(self.duration * fps))
):
# int is used to ensure that floating point errors are rounded
# down to the nearest integer
t = frame_index / fps
frame = self.get_frame(t)
if (dtype is not None) and (frame.dtype != dtype):
frame = frame.astype(dtype)
if with_times:
yield t, frame
else:
yield frame
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/Clip.py#L488-L540
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52
] | 100 |
[] | 0 | true | 100 | 53 | 5 | 100 | 36 |
def iter_frames(self, fps=None, with_times=False, logger=None, dtype=None):
logger = proglog.default_bar_logger(logger)
for frame_index in logger.iter_bar(
frame_index=np.arange(0, int(self.duration * fps))
):
# int is used to ensure that floating point errors are rounded
# down to the nearest integer
t = frame_index / fps
frame = self.get_frame(t)
if (dtype is not None) and (frame.dtype != dtype):
frame = frame.astype(dtype)
if with_times:
yield t, frame
else:
yield frame
| 28,369 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/Clip.py
|
Clip.close
|
(self)
|
Release any resources that are in use.
|
Release any resources that are in use.
| 542 | 552 |
def close(self):
"""Release any resources that are in use."""
# Implementation note for subclasses:
#
# * Memory-based resources can be left to the garbage-collector.
# * However, any open files should be closed, and subprocesses
# should be terminated.
# * Be wary that shallow copies are frequently used.
# Closing a Clip may affect its copies.
# * Therefore, should NOT be called by __del__().
pass
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/Clip.py#L542-L552
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 100 |
[] | 0 | true | 100 | 11 | 1 | 100 | 1 |
def close(self):
# Implementation note for subclasses:
#
# * Memory-based resources can be left to the garbage-collector.
# * However, any open files should be closed, and subprocesses
# should be terminated.
# * Be wary that shallow copies are frequently used.
# Closing a Clip may affect its copies.
# * Therefore, should NOT be called by __del__().
pass
| 28,370 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/Clip.py
|
Clip.__eq__
|
(self, other)
|
return True
| 554 | 569 |
def __eq__(self, other):
if not isinstance(other, Clip):
return NotImplemented
# Make sure that the total number of frames is the same
self_length = self.duration * self.fps
other_length = other.duration * other.fps
if self_length != other_length:
return False
# Make sure that each frame is the same
for frame1, frame2 in zip(self.iter_frames(), other.iter_frames()):
if not np.array_equal(frame1, frame2):
return False
return True
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/Clip.py#L554-L569
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15
] | 100 |
[] | 0 | true | 100 | 16 | 5 | 100 | 0 |
def __eq__(self, other):
if not isinstance(other, Clip):
return NotImplemented
# Make sure that the total number of frames is the same
self_length = self.duration * self.fps
other_length = other.duration * other.fps
if self_length != other_length:
return False
# Make sure that each frame is the same
for frame1, frame2 in zip(self.iter_frames(), other.iter_frames()):
if not np.array_equal(frame1, frame2):
return False
return True
| 28,371 |
||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/Clip.py
|
Clip.__enter__
|
(self)
|
return self
| 573 | 574 |
def __enter__(self):
return self
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/Clip.py#L573-L574
| 46 |
[
0,
1
] | 100 |
[] | 0 | true | 100 | 2 | 1 | 100 | 0 |
def __enter__(self):
return self
| 28,372 |
||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/Clip.py
|
Clip.__exit__
|
(self, exc_type, exc_value, traceback)
| 576 | 577 |
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/Clip.py#L576-L577
| 46 |
[
0,
1
] | 100 |
[] | 0 | true | 100 | 2 | 1 | 100 | 0 |
def __exit__(self, exc_type, exc_value, traceback):
self.close()
| 28,373 |
|||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/config.py
|
try_cmd
|
(cmd)
|
TODO: add documentation
|
TODO: add documentation
| 27 | 38 |
def try_cmd(cmd):
"""TODO: add documentation"""
try:
popen_params = cross_platform_popen_params(
{"stdout": sp.PIPE, "stderr": sp.PIPE, "stdin": sp.DEVNULL}
)
proc = sp.Popen(cmd, **popen_params)
proc.communicate()
except Exception as err:
return False, err
else:
return True, None
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/config.py#L27-L38
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11
] | 100 |
[] | 0 | true | 63.076923 | 12 | 2 | 100 | 1 |
def try_cmd(cmd):
try:
popen_params = cross_platform_popen_params(
{"stdout": sp.PIPE, "stderr": sp.PIPE, "stdin": sp.DEVNULL}
)
proc = sp.Popen(cmd, **popen_params)
proc.communicate()
except Exception as err:
return False, err
else:
return True, None
| 28,374 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/config.py
|
check
|
()
|
Check if moviepy has found the binaries of FFmpeg and ImageMagick.
|
Check if moviepy has found the binaries of FFmpeg and ImageMagick.
| 107 | 121 |
def check():
"""Check if moviepy has found the binaries of FFmpeg and ImageMagick."""
if try_cmd([FFMPEG_BINARY])[0]:
print(f"MoviePy: ffmpeg successfully found in '{FFMPEG_BINARY}'.")
else: # pragma: no cover
print(f"MoviePy: can't find or access ffmpeg in '{FFMPEG_BINARY}'.")
if try_cmd([IMAGEMAGICK_BINARY])[0]:
print(f"MoviePy: ImageMagick successfully found in '{IMAGEMAGICK_BINARY}'.")
else: # pragma: no cover
print(f"MoviePy: can't find or access ImageMagick in '{IMAGEMAGICK_BINARY}'.")
if DOTENV:
print(f"\n.env file content at {DOTENV}:\n")
print(Path(DOTENV).read_text())
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/config.py#L107-L121
| 46 |
[
0,
1
] | 18.181818 |
[
2,
3,
7,
8,
12,
13,
14
] | 63.636364 | false | 63.076923 | 15 | 4 | 36.363636 | 1 |
def check():
if try_cmd([FFMPEG_BINARY])[0]:
print(f"MoviePy: ffmpeg successfully found in '{FFMPEG_BINARY}'.")
else: # pragma: no cover
print(f"MoviePy: can't find or access ffmpeg in '{FFMPEG_BINARY}'.")
if try_cmd([IMAGEMAGICK_BINARY])[0]:
print(f"MoviePy: ImageMagick successfully found in '{IMAGEMAGICK_BINARY}'.")
else: # pragma: no cover
print(f"MoviePy: can't find or access ImageMagick in '{IMAGEMAGICK_BINARY}'.")
if DOTENV:
print(f"\n.env file content at {DOTENV}:\n")
print(Path(DOTENV).read_text())
| 28,375 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/AudioClip.py
|
concatenate_audioclips
|
(clips)
|
return CompositeAudioClip(newclips).with_duration(starts_end[-1])
|
Concatenates one AudioClip after another, in the order that are passed
to ``clips`` parameter.
Parameters
----------
clips
List of audio clips, which will be played one after other.
|
Concatenates one AudioClip after another, in the order that are passed
to ``clips`` parameter.
| 371 | 385 |
def concatenate_audioclips(clips):
"""Concatenates one AudioClip after another, in the order that are passed
to ``clips`` parameter.
Parameters
----------
clips
List of audio clips, which will be played one after other.
"""
# start, end/start2, end2/start3... end
starts_end = np.cumsum([0, *[clip.duration for clip in clips]])
newclips = [clip.with_start(t) for clip, t in zip(clips, starts_end[:-1])]
return CompositeAudioClip(newclips).with_duration(starts_end[-1])
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/AudioClip.py#L371-L385
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14
] | 100 |
[] | 0 | true | 94.354839 | 15 | 3 | 100 | 8 |
def concatenate_audioclips(clips):
# start, end/start2, end2/start3... end
starts_end = np.cumsum([0, *[clip.duration for clip in clips]])
newclips = [clip.with_start(t) for clip, t in zip(clips, starts_end[:-1])]
return CompositeAudioClip(newclips).with_duration(starts_end[-1])
| 28,376 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/AudioClip.py
|
AudioClip.__init__
|
(self, make_frame=None, duration=None, fps=None)
| 65 | 80 |
def __init__(self, make_frame=None, duration=None, fps=None):
super().__init__()
if fps is not None:
self.fps = fps
if make_frame is not None:
self.make_frame = make_frame
frame0 = self.get_frame(0)
if hasattr(frame0, "__iter__"):
self.nchannels = len(list(frame0))
else:
self.nchannels = 1
if duration is not None:
self.duration = duration
self.end = duration
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/AudioClip.py#L65-L80
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
12,
13,
14,
15
] | 93.75 |
[] | 0 | false | 94.354839 | 16 | 5 | 100 | 0 |
def __init__(self, make_frame=None, duration=None, fps=None):
super().__init__()
if fps is not None:
self.fps = fps
if make_frame is not None:
self.make_frame = make_frame
frame0 = self.get_frame(0)
if hasattr(frame0, "__iter__"):
self.nchannels = len(list(frame0))
else:
self.nchannels = 1
if duration is not None:
self.duration = duration
self.end = duration
| 28,377 |
|||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/AudioClip.py
|
AudioClip.iter_chunks
|
(
self,
chunksize=None,
chunk_duration=None,
fps=None,
quantize=False,
nbytes=2,
logger=None,
)
|
Iterator that returns the whole sound array of the clip by chunks
|
Iterator that returns the whole sound array of the clip by chunks
| 83 | 111 |
def iter_chunks(
self,
chunksize=None,
chunk_duration=None,
fps=None,
quantize=False,
nbytes=2,
logger=None,
):
"""Iterator that returns the whole sound array of the clip by chunks"""
if fps is None:
fps = self.fps
logger = proglog.default_bar_logger(logger)
if chunk_duration is not None:
chunksize = int(chunk_duration * fps)
total_size = int(fps * self.duration)
nchunks = total_size // chunksize + 1
positions = np.linspace(0, total_size, nchunks + 1, endpoint=True, dtype=int)
for i in logger.iter_bar(chunk=list(range(nchunks))):
size = positions[i + 1] - positions[i]
assert size <= chunksize
timings = (1.0 / fps) * np.arange(positions[i], positions[i + 1])
yield self.to_soundarray(
timings, nbytes=nbytes, quantize=quantize, fps=fps, buffersize=chunksize
)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/AudioClip.py#L83-L111
| 46 |
[
0,
9,
10,
11,
12,
13,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28
] | 68.965517 |
[
14
] | 3.448276 | false | 94.354839 | 29 | 5 | 96.551724 | 1 |
def iter_chunks(
self,
chunksize=None,
chunk_duration=None,
fps=None,
quantize=False,
nbytes=2,
logger=None,
):
if fps is None:
fps = self.fps
logger = proglog.default_bar_logger(logger)
if chunk_duration is not None:
chunksize = int(chunk_duration * fps)
total_size = int(fps * self.duration)
nchunks = total_size // chunksize + 1
positions = np.linspace(0, total_size, nchunks + 1, endpoint=True, dtype=int)
for i in logger.iter_bar(chunk=list(range(nchunks))):
size = positions[i + 1] - positions[i]
assert size <= chunksize
timings = (1.0 / fps) * np.arange(positions[i], positions[i + 1])
yield self.to_soundarray(
timings, nbytes=nbytes, quantize=quantize, fps=fps, buffersize=chunksize
)
| 28,378 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/AudioClip.py
|
AudioClip.to_soundarray
|
(
self, tt=None, fps=None, quantize=False, nbytes=2, buffersize=50000
)
|
return snd_array
|
Transforms the sound into an array that can be played by pygame
or written in a wav file. See ``AudioClip.preview``.
Parameters
----------
fps
Frame rate of the sound for the conversion.
44100 for top quality.
nbytes
Number of bytes to encode the sound: 1 for 8bit sound,
2 for 16bit, 4 for 32bit sound.
|
Transforms the sound into an array that can be played by pygame
or written in a wav file. See ``AudioClip.preview``.
| 114 | 164 |
def to_soundarray(
self, tt=None, fps=None, quantize=False, nbytes=2, buffersize=50000
):
"""
Transforms the sound into an array that can be played by pygame
or written in a wav file. See ``AudioClip.preview``.
Parameters
----------
fps
Frame rate of the sound for the conversion.
44100 for top quality.
nbytes
Number of bytes to encode the sound: 1 for 8bit sound,
2 for 16bit, 4 for 32bit sound.
"""
if tt is None:
if fps is None:
fps = self.fps
max_duration = 1 * buffersize / fps
if self.duration > max_duration:
stacker = np.vstack if self.nchannels == 2 else np.hstack
return stacker(
tuple(
self.iter_chunks(
fps=fps, quantize=quantize, nbytes=2, chunksize=buffersize
)
)
)
else:
tt = np.arange(0, self.duration, 1.0 / fps)
"""
elif len(tt)> 1.5*buffersize:
nchunks = int(len(tt)/buffersize+1)
tt_chunks = np.array_split(tt, nchunks)
return stacker([self.to_soundarray(tt=ttc, buffersize=buffersize, fps=fps,
quantize=quantize, nbytes=nbytes)
for ttc in tt_chunks])
"""
snd_array = self.get_frame(tt)
if quantize:
snd_array = np.maximum(-0.99, np.minimum(0.99, snd_array))
inttype = {1: "int8", 2: "int16", 4: "int32"}[nbytes]
snd_array = (2 ** (8 * nbytes - 1) * snd_array).astype(inttype)
return snd_array
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/AudioClip.py#L114-L164
| 46 |
[
0,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50
] | 66.666667 |
[] | 0 | false | 94.354839 | 51 | 5 | 100 | 13 |
def to_soundarray(
self, tt=None, fps=None, quantize=False, nbytes=2, buffersize=50000
):
if tt is None:
if fps is None:
fps = self.fps
max_duration = 1 * buffersize / fps
if self.duration > max_duration:
stacker = np.vstack if self.nchannels == 2 else np.hstack
return stacker(
tuple(
self.iter_chunks(
fps=fps, quantize=quantize, nbytes=2, chunksize=buffersize
)
)
)
else:
tt = np.arange(0, self.duration, 1.0 / fps)
snd_array = self.get_frame(tt)
if quantize:
snd_array = np.maximum(-0.99, np.minimum(0.99, snd_array))
inttype = {1: "int8", 2: "int16", 4: "int32"}[nbytes]
snd_array = (2 ** (8 * nbytes - 1) * snd_array).astype(inttype)
return snd_array
| 28,379 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/AudioClip.py
|
AudioClip.max_volume
|
(self, stereo=False, chunksize=50000, logger=None)
|
return maxi if stereo else maxi[0]
|
Returns the maximum volume level of the clip.
|
Returns the maximum volume level of the clip.
| 166 | 177 |
def max_volume(self, stereo=False, chunksize=50000, logger=None):
"""Returns the maximum volume level of the clip."""
# max volume separated by channels if ``stereo`` and not mono
stereo = stereo and self.nchannels > 1
# zero for each channel
maxi = np.zeros(self.nchannels)
for chunk in self.iter_chunks(chunksize=chunksize, logger=logger):
maxi = np.maximum(maxi, abs(chunk).max(axis=0))
# if mono returns float, otherwise array of volumes by channel
return maxi if stereo else maxi[0]
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/AudioClip.py#L166-L177
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11
] | 100 |
[] | 0 | true | 94.354839 | 12 | 3 | 100 | 1 |
def max_volume(self, stereo=False, chunksize=50000, logger=None):
# max volume separated by channels if ``stereo`` and not mono
stereo = stereo and self.nchannels > 1
# zero for each channel
maxi = np.zeros(self.nchannels)
for chunk in self.iter_chunks(chunksize=chunksize, logger=logger):
maxi = np.maximum(maxi, abs(chunk).max(axis=0))
# if mono returns float, otherwise array of volumes by channel
return maxi if stereo else maxi[0]
| 28,380 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/AudioClip.py
|
AudioClip.write_audiofile
|
(
self,
filename,
fps=None,
nbytes=2,
buffersize=2000,
codec=None,
bitrate=None,
ffmpeg_params=None,
write_logfile=False,
logger="bar",
)
|
return ffmpeg_audiowrite(
self,
filename,
fps,
nbytes,
buffersize,
codec=codec,
bitrate=bitrate,
write_logfile=write_logfile,
ffmpeg_params=ffmpeg_params,
logger=logger,
)
|
Writes an audio file from the AudioClip.
Parameters
----------
filename
Name of the output file, as a string or a path-like object.
fps
Frames per second. If not set, it will try default to self.fps if
already set, otherwise it will default to 44100.
nbytes
Sample width (set to 2 for 16-bit sound, 4 for 32-bit sound)
codec
Which audio codec should be used. If None provided, the codec is
determined based on the extension of the filename. Choose
'pcm_s16le' for 16-bit wav and 'pcm_s32le' for 32-bit wav.
bitrate
Audio bitrate, given as a string like '50k', '500k', '3000k'.
Will determine the size and quality of the output file.
Note that it mainly an indicative goal, the bitrate won't
necessarily be the this in the output file.
ffmpeg_params
Any additional parameters you would like to pass, as a list
of terms, like ['-option1', 'value1', '-option2', 'value2']
write_logfile
If true, produces a detailed logfile named filename + '.log'
when writing the file
logger
Either ``"bar"`` for progress bar or ``None`` or any Proglog logger.
|
Writes an audio file from the AudioClip.
| 181 | 260 |
def write_audiofile(
self,
filename,
fps=None,
nbytes=2,
buffersize=2000,
codec=None,
bitrate=None,
ffmpeg_params=None,
write_logfile=False,
logger="bar",
):
"""Writes an audio file from the AudioClip.
Parameters
----------
filename
Name of the output file, as a string or a path-like object.
fps
Frames per second. If not set, it will try default to self.fps if
already set, otherwise it will default to 44100.
nbytes
Sample width (set to 2 for 16-bit sound, 4 for 32-bit sound)
codec
Which audio codec should be used. If None provided, the codec is
determined based on the extension of the filename. Choose
'pcm_s16le' for 16-bit wav and 'pcm_s32le' for 32-bit wav.
bitrate
Audio bitrate, given as a string like '50k', '500k', '3000k'.
Will determine the size and quality of the output file.
Note that it mainly an indicative goal, the bitrate won't
necessarily be the this in the output file.
ffmpeg_params
Any additional parameters you would like to pass, as a list
of terms, like ['-option1', 'value1', '-option2', 'value2']
write_logfile
If true, produces a detailed logfile named filename + '.log'
when writing the file
logger
Either ``"bar"`` for progress bar or ``None`` or any Proglog logger.
"""
if not fps:
if not self.fps:
fps = 44100
else:
fps = self.fps
if codec is None:
name, ext = os.path.splitext(os.path.basename(filename))
try:
codec = extensions_dict[ext[1:]]["codec"][0]
except KeyError:
raise ValueError(
"MoviePy couldn't find the codec associated "
"with the filename. Provide the 'codec' "
"parameter in write_audiofile."
)
return ffmpeg_audiowrite(
self,
filename,
fps,
nbytes,
buffersize,
codec=codec,
bitrate=bitrate,
write_logfile=write_logfile,
ffmpeg_params=ffmpeg_params,
logger=logger,
)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/AudioClip.py#L181-L260
| 46 |
[
0,
50,
51,
52,
54,
55,
56,
57,
58,
59,
60,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79
] | 30 |
[
53,
61,
62
] | 3.75 | false | 94.354839 | 80 | 5 | 96.25 | 37 |
def write_audiofile(
self,
filename,
fps=None,
nbytes=2,
buffersize=2000,
codec=None,
bitrate=None,
ffmpeg_params=None,
write_logfile=False,
logger="bar",
):
if not fps:
if not self.fps:
fps = 44100
else:
fps = self.fps
if codec is None:
name, ext = os.path.splitext(os.path.basename(filename))
try:
codec = extensions_dict[ext[1:]]["codec"][0]
except KeyError:
raise ValueError(
"MoviePy couldn't find the codec associated "
"with the filename. Provide the 'codec' "
"parameter in write_audiofile."
)
return ffmpeg_audiowrite(
self,
filename,
fps,
nbytes,
buffersize,
codec=codec,
bitrate=bitrate,
write_logfile=write_logfile,
ffmpeg_params=ffmpeg_params,
logger=logger,
)
| 28,381 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/AudioClip.py
|
AudioArrayClip.__init__
|
(self, array, fps)
| 281 | 306 |
def __init__(self, array, fps):
Clip.__init__(self)
self.array = array
self.fps = fps
self.duration = 1.0 * len(array) / fps
def make_frame(t):
"""Complicated, but must be able to handle the case where t
is a list of the form sin(t).
"""
if isinstance(t, np.ndarray):
array_inds = np.round(self.fps * t).astype(int)
in_array = (array_inds >= 0) & (array_inds < len(self.array))
result = np.zeros((len(t), 2))
result[in_array] = self.array[array_inds[in_array]]
return result
else:
i = int(self.fps * t)
if i < 0 or i >= len(self.array):
return 0 * self.array[0]
else:
return self.array[i]
self.make_frame = make_frame
self.nchannels = len(list(self.get_frame(0)))
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/AudioClip.py#L281-L306
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
21,
22,
23,
24,
25
] | 96.153846 |
[
20
] | 3.846154 | false | 94.354839 | 26 | 5 | 96.153846 | 0 |
def __init__(self, array, fps):
Clip.__init__(self)
self.array = array
self.fps = fps
self.duration = 1.0 * len(array) / fps
def make_frame(t):
if isinstance(t, np.ndarray):
array_inds = np.round(self.fps * t).astype(int)
in_array = (array_inds >= 0) & (array_inds < len(self.array))
result = np.zeros((len(t), 2))
result[in_array] = self.array[array_inds[in_array]]
return result
else:
i = int(self.fps * t)
if i < 0 or i >= len(self.array):
return 0 * self.array[0]
else:
return self.array[i]
self.make_frame = make_frame
self.nchannels = len(list(self.get_frame(0)))
| 28,382 |
|||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/AudioClip.py
|
CompositeAudioClip.__init__
|
(self, clips)
| 324 | 341 |
def __init__(self, clips):
self.clips = clips
self.nchannels = max(clip.nchannels for clip in self.clips)
# self.duration is set at AudioClip
duration = None
for end in self.ends:
if end is None:
break
duration = max(end, duration or 0)
# self.fps is set at AudioClip
fps = None
for clip in self.clips:
if hasattr(clip, "fps") and isinstance(clip.fps, numbers.Number):
fps = max(clip.fps, fps or 0)
super().__init__(duration=duration, fps=fps)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/AudioClip.py#L324-L341
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
9,
10,
11,
12,
13,
14,
15,
16,
17
] | 94.444444 |
[
8
] | 5.555556 | false | 94.354839 | 18 | 8 | 94.444444 | 0 |
def __init__(self, clips):
self.clips = clips
self.nchannels = max(clip.nchannels for clip in self.clips)
# self.duration is set at AudioClip
duration = None
for end in self.ends:
if end is None:
break
duration = max(end, duration or 0)
# self.fps is set at AudioClip
fps = None
for clip in self.clips:
if hasattr(clip, "fps") and isinstance(clip.fps, numbers.Number):
fps = max(clip.fps, fps or 0)
super().__init__(duration=duration, fps=fps)
| 28,383 |
|||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/AudioClip.py
|
CompositeAudioClip.starts
|
(self)
|
return (clip.start for clip in self.clips)
|
Returns starting times for all clips in the composition.
|
Returns starting times for all clips in the composition.
| 344 | 346 |
def starts(self):
"""Returns starting times for all clips in the composition."""
return (clip.start for clip in self.clips)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/AudioClip.py#L344-L346
| 46 |
[
0,
1,
2
] | 100 |
[] | 0 | true | 94.354839 | 3 | 1 | 100 | 1 |
def starts(self):
return (clip.start for clip in self.clips)
| 28,384 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/AudioClip.py
|
CompositeAudioClip.ends
|
(self)
|
return (clip.end for clip in self.clips)
|
Returns ending times for all clips in the composition.
|
Returns ending times for all clips in the composition.
| 349 | 351 |
def ends(self):
"""Returns ending times for all clips in the composition."""
return (clip.end for clip in self.clips)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/AudioClip.py#L349-L351
| 46 |
[
0,
1,
2
] | 100 |
[] | 0 | true | 94.354839 | 3 | 1 | 100 | 1 |
def ends(self):
return (clip.end for clip in self.clips)
| 28,385 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/AudioClip.py
|
CompositeAudioClip.make_frame
|
(self, t)
|
return zero + sum(sounds)
|
Renders a frame for the composition for the time ``t``.
|
Renders a frame for the composition for the time ``t``.
| 353 | 368 |
def make_frame(self, t):
"""Renders a frame for the composition for the time ``t``."""
played_parts = [clip.is_playing(t) for clip in self.clips]
sounds = [
clip.get_frame(t - clip.start) * np.array([part]).T
for clip, part in zip(self.clips, played_parts)
if (part is not False)
]
if isinstance(t, np.ndarray):
zero = np.zeros((len(t), self.nchannels))
else:
zero = np.zeros(self.nchannels)
return zero + sum(sounds)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/AudioClip.py#L353-L368
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
14,
15
] | 93.75 |
[
13
] | 6.25 | false | 94.354839 | 16 | 4 | 93.75 | 1 |
def make_frame(self, t):
played_parts = [clip.is_playing(t) for clip in self.clips]
sounds = [
clip.get_frame(t - clip.start) * np.array([part]).T
for clip, part in zip(self.clips, played_parts)
if (part is not False)
]
if isinstance(t, np.ndarray):
zero = np.zeros((len(t), self.nchannels))
else:
zero = np.zeros(self.nchannels)
return zero + sum(sounds)
| 28,386 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/tools/cuts.py
|
find_audio_period
|
(clip, min_time=0.1, max_time=2, time_resolution=0.01)
|
return chunk_duration * np.argmax(corrs)
|
Finds the period, in seconds of an audioclip.
Parameters
----------
min_time : float, optional
Minimum bound for the returned value.
max_time : float, optional
Maximum bound for the returned value.
time_resolution : float, optional
Numerical precision.
|
Finds the period, in seconds of an audioclip.
| 6 | 29 |
def find_audio_period(clip, min_time=0.1, max_time=2, time_resolution=0.01):
"""Finds the period, in seconds of an audioclip.
Parameters
----------
min_time : float, optional
Minimum bound for the returned value.
max_time : float, optional
Maximum bound for the returned value.
time_resolution : float, optional
Numerical precision.
"""
chunksize = int(time_resolution * clip.fps)
chunk_duration = 1.0 * chunksize / clip.fps
# v denotes the list of volumes
v = np.array([(chunk**2).sum() for chunk in clip.iter_chunks(chunksize)])
v = v - v.mean()
corrs = np.correlate(v, v, mode="full")[-len(v) :]
corrs[: int(min_time / chunk_duration)] = 0
corrs[int(max_time / chunk_duration) :] = 0
return chunk_duration * np.argmax(corrs)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/tools/cuts.py#L6-L29
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23
] | 100 |
[] | 0 | true | 100 | 24 | 2 | 100 | 13 |
def find_audio_period(clip, min_time=0.1, max_time=2, time_resolution=0.01):
chunksize = int(time_resolution * clip.fps)
chunk_duration = 1.0 * chunksize / clip.fps
# v denotes the list of volumes
v = np.array([(chunk**2).sum() for chunk in clip.iter_chunks(chunksize)])
v = v - v.mean()
corrs = np.correlate(v, v, mode="full")[-len(v) :]
corrs[: int(min_time / chunk_duration)] = 0
corrs[int(max_time / chunk_duration) :] = 0
return chunk_duration * np.argmax(corrs)
| 28,387 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/ffmpeg_audiowriter.py
|
ffmpeg_audiowrite
|
(
clip,
filename,
fps,
nbytes,
buffersize,
codec="libvorbis",
bitrate=None,
write_logfile=False,
ffmpeg_params=None,
logger="bar",
)
|
A function that wraps the FFMPEG_AudioWriter to write an AudioClip
to a file.
|
A function that wraps the FFMPEG_AudioWriter to write an AudioClip
to a file.
| 172 | 214 |
def ffmpeg_audiowrite(
clip,
filename,
fps,
nbytes,
buffersize,
codec="libvorbis",
bitrate=None,
write_logfile=False,
ffmpeg_params=None,
logger="bar",
):
"""
A function that wraps the FFMPEG_AudioWriter to write an AudioClip
to a file.
"""
if write_logfile:
logfile = open(filename + ".log", "w+")
else:
logfile = None
logger = proglog.default_bar_logger(logger)
logger(message="MoviePy - Writing audio in %s" % filename)
writer = FFMPEG_AudioWriter(
filename,
fps,
nbytes,
clip.nchannels,
codec=codec,
bitrate=bitrate,
logfile=logfile,
ffmpeg_params=ffmpeg_params,
)
for chunk in clip.iter_chunks(
chunksize=buffersize, quantize=True, nbytes=nbytes, fps=fps, logger=logger
):
writer.write_frames(chunk)
writer.close()
if write_logfile:
logfile.close()
logger(message="MoviePy - Done.")
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/ffmpeg_audiowriter.py#L172-L214
| 46 |
[
0,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42
] | 67.44186 |
[] | 0 | false | 72.972973 | 43 | 4 | 100 | 2 |
def ffmpeg_audiowrite(
clip,
filename,
fps,
nbytes,
buffersize,
codec="libvorbis",
bitrate=None,
write_logfile=False,
ffmpeg_params=None,
logger="bar",
):
if write_logfile:
logfile = open(filename + ".log", "w+")
else:
logfile = None
logger = proglog.default_bar_logger(logger)
logger(message="MoviePy - Writing audio in %s" % filename)
writer = FFMPEG_AudioWriter(
filename,
fps,
nbytes,
clip.nchannels,
codec=codec,
bitrate=bitrate,
logfile=logfile,
ffmpeg_params=ffmpeg_params,
)
for chunk in clip.iter_chunks(
chunksize=buffersize, quantize=True, nbytes=nbytes, fps=fps, logger=logger
):
writer.write_frames(chunk)
writer.close()
if write_logfile:
logfile.close()
logger(message="MoviePy - Done.")
| 28,388 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/ffmpeg_audiowriter.py
|
FFMPEG_AudioWriter.__init__
|
(
self,
filename,
fps_input,
nbytes=2,
nchannels=2,
codec="libfdk_aac",
bitrate=None,
input_video=None,
logfile=None,
ffmpeg_params=None,
)
| 38 | 91 |
def __init__(
self,
filename,
fps_input,
nbytes=2,
nchannels=2,
codec="libfdk_aac",
bitrate=None,
input_video=None,
logfile=None,
ffmpeg_params=None,
):
if logfile is None:
logfile = sp.PIPE
self.logfile = logfile
self.filename = filename
self.codec = codec
self.ext = self.filename.split(".")[-1]
# order is important
cmd = [
FFMPEG_BINARY,
"-y",
"-loglevel",
"error" if logfile == sp.PIPE else "info",
"-f",
"s%dle" % (8 * nbytes),
"-acodec",
"pcm_s%dle" % (8 * nbytes),
"-ar",
"%d" % fps_input,
"-ac",
"%d" % nchannels,
"-i",
"-",
]
if input_video is None:
cmd.extend(["-vn"])
else:
cmd.extend(["-i", input_video, "-vcodec", "copy"])
cmd.extend(["-acodec", codec] + ["-ar", "%d" % fps_input])
cmd.extend(["-strict", "-2"]) # needed to support codec 'aac'
if bitrate is not None:
cmd.extend(["-ab", bitrate])
if ffmpeg_params is not None:
cmd.extend(ffmpeg_params)
cmd.extend([filename])
popen_params = cross_platform_popen_params(
{"stdout": sp.DEVNULL, "stderr": logfile, "stdin": sp.PIPE}
)
self.proc = sp.Popen(cmd, **popen_params)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/ffmpeg_audiowriter.py#L38-L91
| 46 |
[
0,
12,
13,
14,
15,
16,
17,
18,
19,
20,
36,
37,
40,
41,
42,
43,
44,
45,
47,
48,
49,
52,
53
] | 42.592593 |
[
39,
46
] | 3.703704 | false | 72.972973 | 54 | 5 | 96.296296 | 0 |
def __init__(
self,
filename,
fps_input,
nbytes=2,
nchannels=2,
codec="libfdk_aac",
bitrate=None,
input_video=None,
logfile=None,
ffmpeg_params=None,
):
if logfile is None:
logfile = sp.PIPE
self.logfile = logfile
self.filename = filename
self.codec = codec
self.ext = self.filename.split(".")[-1]
# order is important
cmd = [
FFMPEG_BINARY,
"-y",
"-loglevel",
"error" if logfile == sp.PIPE else "info",
"-f",
"s%dle" % (8 * nbytes),
"-acodec",
"pcm_s%dle" % (8 * nbytes),
"-ar",
"%d" % fps_input,
"-ac",
"%d" % nchannels,
"-i",
"-",
]
if input_video is None:
cmd.extend(["-vn"])
else:
cmd.extend(["-i", input_video, "-vcodec", "copy"])
cmd.extend(["-acodec", codec] + ["-ar", "%d" % fps_input])
cmd.extend(["-strict", "-2"]) # needed to support codec 'aac'
if bitrate is not None:
cmd.extend(["-ab", bitrate])
if ffmpeg_params is not None:
cmd.extend(ffmpeg_params)
cmd.extend([filename])
popen_params = cross_platform_popen_params(
{"stdout": sp.DEVNULL, "stderr": logfile, "stdin": sp.PIPE}
)
self.proc = sp.Popen(cmd, **popen_params)
| 28,389 |
|||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/ffmpeg_audiowriter.py
|
FFMPEG_AudioWriter.write_frames
|
(self, frames_array)
|
TODO: add documentation
|
TODO: add documentation
| 93 | 144 |
def write_frames(self, frames_array):
"""TODO: add documentation"""
try:
self.proc.stdin.write(frames_array.tobytes())
except IOError as err:
_, ffmpeg_error = self.proc.communicate()
if ffmpeg_error is not None:
ffmpeg_error = ffmpeg_error.decode()
else:
# The error was redirected to a logfile with `write_logfile=True`,
# so read the error from that file instead
self.logfile.seek(0)
ffmpeg_error = self.logfile.read()
error = (
f"{err}\n\nMoviePy error: FFMPEG encountered the following error while "
f"writing file {self.filename}:\n\n {ffmpeg_error}"
)
if "Unknown encoder" in ffmpeg_error:
error += (
"\n\nThe audio export failed because FFMPEG didn't find the "
f"specified codec for audio encoding {self.codec}. "
"Please install this codec or change the codec when calling "
"write_videofile or write_audiofile.\nFor instance for mp3:\n"
" >>> write_videofile('myvid.mp4', audio_codec='libmp3lame')"
)
elif "incorrect codec parameters ?" in ffmpeg_error:
error += (
"\n\nThe audio export failed, possibly because the "
f"codec specified for the video {self.codec} is not compatible"
f" with the given extension {self.ext}. Please specify a "
"valid 'codec' argument in write_audiofile or 'audio_codoc'"
"argument in write_videofile. This would be "
"'libmp3lame' for mp3, 'libvorbis' for ogg..."
)
elif "bitrate not specified" in ffmpeg_error:
error += (
"\n\nThe audio export failed, possibly because the "
"bitrate you specified was too high or too low for "
"the audio codec."
)
elif "Invalid encoder type" in ffmpeg_error:
error += (
"\n\nThe audio export failed because the codec "
"or file extension you provided is not suitable for audio"
)
raise IOError(error)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/ffmpeg_audiowriter.py#L93-L144
| 46 |
[
0,
1,
2,
3
] | 7.692308 |
[
4,
5,
6,
7,
11,
12,
14,
19,
20,
28,
29,
38,
39,
45,
46,
51
] | 30.769231 | false | 72.972973 | 52 | 7 | 69.230769 | 1 |
def write_frames(self, frames_array):
try:
self.proc.stdin.write(frames_array.tobytes())
except IOError as err:
_, ffmpeg_error = self.proc.communicate()
if ffmpeg_error is not None:
ffmpeg_error = ffmpeg_error.decode()
else:
# The error was redirected to a logfile with `write_logfile=True`,
# so read the error from that file instead
self.logfile.seek(0)
ffmpeg_error = self.logfile.read()
error = (
f"{err}\n\nMoviePy error: FFMPEG encountered the following error while "
f"writing file {self.filename}:\n\n {ffmpeg_error}"
)
if "Unknown encoder" in ffmpeg_error:
error += (
"\n\nThe audio export failed because FFMPEG didn't find the "
f"specified codec for audio encoding {self.codec}. "
"Please install this codec or change the codec when calling "
"write_videofile or write_audiofile.\nFor instance for mp3:\n"
" >>> write_videofile('myvid.mp4', audio_codec='libmp3lame')"
)
elif "incorrect codec parameters ?" in ffmpeg_error:
error += (
"\n\nThe audio export failed, possibly because the "
f"codec specified for the video {self.codec} is not compatible"
f" with the given extension {self.ext}. Please specify a "
"valid 'codec' argument in write_audiofile or 'audio_codoc'"
"argument in write_videofile. This would be "
"'libmp3lame' for mp3, 'libvorbis' for ogg..."
)
elif "bitrate not specified" in ffmpeg_error:
error += (
"\n\nThe audio export failed, possibly because the "
"bitrate you specified was too high or too low for "
"the audio codec."
)
elif "Invalid encoder type" in ffmpeg_error:
error += (
"\n\nThe audio export failed because the codec "
"or file extension you provided is not suitable for audio"
)
raise IOError(error)
| 28,390 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/ffmpeg_audiowriter.py
|
FFMPEG_AudioWriter.close
|
(self)
|
Closes the writer, terminating the subprocess if is still alive.
|
Closes the writer, terminating the subprocess if is still alive.
| 146 | 156 |
def close(self):
"""Closes the writer, terminating the subprocess if is still alive."""
if hasattr(self, "proc") and self.proc:
self.proc.stdin.close()
self.proc.stdin = None
if self.proc.stderr is not None:
self.proc.stderr.close()
self.proc.stderr = None
# If this causes deadlocks, consider terminating instead.
self.proc.wait()
self.proc = None
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/ffmpeg_audiowriter.py#L146-L156
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
] | 100 |
[] | 0 | true | 72.972973 | 11 | 4 | 100 | 1 |
def close(self):
if hasattr(self, "proc") and self.proc:
self.proc.stdin.close()
self.proc.stdin = None
if self.proc.stderr is not None:
self.proc.stderr.close()
self.proc.stderr = None
# If this causes deadlocks, consider terminating instead.
self.proc.wait()
self.proc = None
| 28,391 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/ffmpeg_audiowriter.py
|
FFMPEG_AudioWriter.__del__
|
(self)
| 158 | 160 |
def __del__(self):
# If the garbage collector comes, make sure the subprocess is terminated.
self.close()
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/ffmpeg_audiowriter.py#L158-L160
| 46 |
[
0,
1,
2
] | 100 |
[] | 0 | true | 72.972973 | 3 | 1 | 100 | 0 |
def __del__(self):
# If the garbage collector comes, make sure the subprocess is terminated.
self.close()
| 28,392 |
|||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/ffmpeg_audiowriter.py
|
FFMPEG_AudioWriter.__enter__
|
(self)
|
return self
| 164 | 165 |
def __enter__(self):
return self
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/ffmpeg_audiowriter.py#L164-L165
| 46 |
[
0
] | 50 |
[
1
] | 50 | false | 72.972973 | 2 | 1 | 50 | 0 |
def __enter__(self):
return self
| 28,393 |
||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/ffmpeg_audiowriter.py
|
FFMPEG_AudioWriter.__exit__
|
(self, exc_type, exc_value, traceback)
| 167 | 168 |
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/ffmpeg_audiowriter.py#L167-L168
| 46 |
[
0
] | 50 |
[
1
] | 50 | false | 72.972973 | 2 | 1 | 50 | 0 |
def __exit__(self, exc_type, exc_value, traceback):
self.close()
| 28,394 |
|||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/readers.py
|
FFMPEG_AudioReader.__init__
|
(
self,
filename,
buffersize,
decode_file=False,
print_infos=False,
fps=44100,
nbytes=2,
nchannels=2,
)
| 43 | 73 |
def __init__(
self,
filename,
buffersize,
decode_file=False,
print_infos=False,
fps=44100,
nbytes=2,
nchannels=2,
):
# TODO bring FFMPEG_AudioReader more in line with FFMPEG_VideoReader
# E.g. here self.pos is still 1-indexed.
# (or have them inherit from a shared parent class)
self.filename = filename
self.nbytes = nbytes
self.fps = fps
self.format = "s%dle" % (8 * nbytes)
self.codec = "pcm_s%dle" % (8 * nbytes)
self.nchannels = nchannels
infos = ffmpeg_parse_infos(filename, decode_file=decode_file)
self.duration = infos["duration"]
self.bitrate = infos["audio_bitrate"]
self.infos = infos
self.proc = None
self.n_frames = int(self.fps * self.duration)
self.buffersize = min(self.n_frames + 1, buffersize)
self.buffer = None
self.buffer_startframe = 1
self.initialize()
self.buffer_around(1)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/readers.py#L43-L73
| 46 |
[
0,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30
] | 64.516129 |
[] | 0 | false | 85.321101 | 31 | 1 | 100 | 0 |
def __init__(
self,
filename,
buffersize,
decode_file=False,
print_infos=False,
fps=44100,
nbytes=2,
nchannels=2,
):
# TODO bring FFMPEG_AudioReader more in line with FFMPEG_VideoReader
# E.g. here self.pos is still 1-indexed.
# (or have them inherit from a shared parent class)
self.filename = filename
self.nbytes = nbytes
self.fps = fps
self.format = "s%dle" % (8 * nbytes)
self.codec = "pcm_s%dle" % (8 * nbytes)
self.nchannels = nchannels
infos = ffmpeg_parse_infos(filename, decode_file=decode_file)
self.duration = infos["duration"]
self.bitrate = infos["audio_bitrate"]
self.infos = infos
self.proc = None
self.n_frames = int(self.fps * self.duration)
self.buffersize = min(self.n_frames + 1, buffersize)
self.buffer = None
self.buffer_startframe = 1
self.initialize()
self.buffer_around(1)
| 28,395 |
|||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/readers.py
|
FFMPEG_AudioReader.initialize
|
(self, start_time=0)
|
Opens the file, creates the pipe.
|
Opens the file, creates the pipe.
| 75 | 122 |
def initialize(self, start_time=0):
"""Opens the file, creates the pipe."""
self.close() # if any
if start_time != 0:
offset = min(1, start_time)
i_arg = [
"-ss",
"%.05f" % (start_time - offset),
"-i",
self.filename,
"-vn",
"-ss",
"%.05f" % offset,
]
else:
i_arg = ["-i", self.filename, "-vn"]
cmd = (
[FFMPEG_BINARY]
+ i_arg
+ [
"-loglevel",
"error",
"-f",
self.format,
"-acodec",
self.codec,
"-ar",
"%d" % self.fps,
"-ac",
"%d" % self.nchannels,
"-",
]
)
popen_params = cross_platform_popen_params(
{
"bufsize": self.buffersize,
"stdout": sp.PIPE,
"stderr": sp.PIPE,
"stdin": sp.DEVNULL,
}
)
self.proc = sp.Popen(cmd, **popen_params)
self.pos = np.round(self.fps * start_time)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/readers.py#L75-L122
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47
] | 100 |
[] | 0 | true | 85.321101 | 48 | 2 | 100 | 1 |
def initialize(self, start_time=0):
self.close() # if any
if start_time != 0:
offset = min(1, start_time)
i_arg = [
"-ss",
"%.05f" % (start_time - offset),
"-i",
self.filename,
"-vn",
"-ss",
"%.05f" % offset,
]
else:
i_arg = ["-i", self.filename, "-vn"]
cmd = (
[FFMPEG_BINARY]
+ i_arg
+ [
"-loglevel",
"error",
"-f",
self.format,
"-acodec",
self.codec,
"-ar",
"%d" % self.fps,
"-ac",
"%d" % self.nchannels,
"-",
]
)
popen_params = cross_platform_popen_params(
{
"bufsize": self.buffersize,
"stdout": sp.PIPE,
"stderr": sp.PIPE,
"stdin": sp.DEVNULL,
}
)
self.proc = sp.Popen(cmd, **popen_params)
self.pos = np.round(self.fps * start_time)
| 28,396 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/readers.py
|
FFMPEG_AudioReader.skip_chunk
|
(self, chunksize)
|
TODO: add documentation
|
TODO: add documentation
| 124 | 128 |
def skip_chunk(self, chunksize):
"""TODO: add documentation"""
_ = self.proc.stdout.read(self.nchannels * chunksize * self.nbytes)
self.proc.stdout.flush()
self.pos = self.pos + chunksize
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/readers.py#L124-L128
| 46 |
[
0,
1
] | 40 |
[
2,
3,
4
] | 60 | false | 85.321101 | 5 | 1 | 40 | 1 |
def skip_chunk(self, chunksize):
_ = self.proc.stdout.read(self.nchannels * chunksize * self.nbytes)
self.proc.stdout.flush()
self.pos = self.pos + chunksize
| 28,397 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/readers.py
|
FFMPEG_AudioReader.read_chunk
|
(self, chunksize)
|
return result
|
TODO: add documentation
|
TODO: add documentation
| 130 | 150 |
def read_chunk(self, chunksize):
"""TODO: add documentation"""
# chunksize is not being autoconverted from float to int
chunksize = int(round(chunksize))
s = self.proc.stdout.read(self.nchannels * chunksize * self.nbytes)
data_type = {1: "int8", 2: "int16", 4: "int32"}[self.nbytes]
if hasattr(np, "frombuffer"):
result = np.frombuffer(s, dtype=data_type)
else:
result = np.fromstring(s, dtype=data_type)
result = (1.0 * result / 2 ** (8 * self.nbytes - 1)).reshape(
(int(len(result) / self.nchannels), self.nchannels)
)
# Pad the read chunk with zeros when there isn't enough audio
# left to read, so the buffer is always at full length.
pad = np.zeros((chunksize - len(result), self.nchannels), dtype=result.dtype)
result = np.concatenate([result, pad])
# self.proc.stdout.flush()
self.pos = self.pos + chunksize
return result
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/readers.py#L130-L150
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20
] | 95.238095 |
[
9
] | 4.761905 | false | 85.321101 | 21 | 2 | 95.238095 | 1 |
def read_chunk(self, chunksize):
# chunksize is not being autoconverted from float to int
chunksize = int(round(chunksize))
s = self.proc.stdout.read(self.nchannels * chunksize * self.nbytes)
data_type = {1: "int8", 2: "int16", 4: "int32"}[self.nbytes]
if hasattr(np, "frombuffer"):
result = np.frombuffer(s, dtype=data_type)
else:
result = np.fromstring(s, dtype=data_type)
result = (1.0 * result / 2 ** (8 * self.nbytes - 1)).reshape(
(int(len(result) / self.nchannels), self.nchannels)
)
# Pad the read chunk with zeros when there isn't enough audio
# left to read, so the buffer is always at full length.
pad = np.zeros((chunksize - len(result), self.nchannels), dtype=result.dtype)
result = np.concatenate([result, pad])
# self.proc.stdout.flush()
self.pos = self.pos + chunksize
return result
| 28,398 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/readers.py
|
FFMPEG_AudioReader.seek
|
(self, pos)
|
Reads a frame at time t. Note for coders: getting an arbitrary
frame in the video with ffmpeg can be painfully slow if some
decoding has to be done. This function tries to avoid fectching
arbitrary frames whenever possible, by moving between adjacent
frames.
|
Reads a frame at time t. Note for coders: getting an arbitrary
frame in the video with ffmpeg can be painfully slow if some
decoding has to be done. This function tries to avoid fectching
arbitrary frames whenever possible, by moving between adjacent
frames.
| 152 | 167 |
def seek(self, pos):
"""
Reads a frame at time t. Note for coders: getting an arbitrary
frame in the video with ffmpeg can be painfully slow if some
decoding has to be done. This function tries to avoid fectching
arbitrary frames whenever possible, by moving between adjacent
frames.
"""
if (pos < self.pos) or (pos > (self.pos + 1000000)):
t = 1.0 * pos / self.fps
self.initialize(t)
elif pos > self.pos:
# print pos
self.skip_chunk(pos - self.pos)
# last case standing: pos = current pos
self.pos = pos
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/readers.py#L152-L167
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
14,
15
] | 93.75 |
[
13
] | 6.25 | false | 85.321101 | 16 | 4 | 93.75 | 5 |
def seek(self, pos):
if (pos < self.pos) or (pos > (self.pos + 1000000)):
t = 1.0 * pos / self.fps
self.initialize(t)
elif pos > self.pos:
# print pos
self.skip_chunk(pos - self.pos)
# last case standing: pos = current pos
self.pos = pos
| 28,399 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/readers.py
|
FFMPEG_AudioReader.get_frame
|
(self, tt)
|
TODO: add documentation
|
TODO: add documentation
| 169 | 229 |
def get_frame(self, tt):
"""TODO: add documentation"""
if isinstance(tt, np.ndarray):
# lazy implementation, but should not cause problems in
# 99.99 % of the cases
# elements of t that are actually in the range of the
# audio file.
in_time = (tt >= 0) & (tt < self.duration)
# Check that the requested time is in the valid range
if not in_time.any():
raise IOError(
"Error in file %s, " % (self.filename)
+ "Accessing time t=%.02f-%.02f seconds, " % (tt[0], tt[-1])
+ "with clip duration=%f seconds, " % self.duration
)
# The np.round in the next line is super-important.
# Removing it results in artifacts in the noise.
frames = np.round((self.fps * tt)).astype(int)[in_time]
fr_min, fr_max = frames.min(), frames.max()
if not (0 <= (fr_min - self.buffer_startframe) < len(self.buffer)):
self.buffer_around(fr_min)
elif not (0 <= (fr_max - self.buffer_startframe) < len(self.buffer)):
self.buffer_around(fr_max)
try:
result = np.zeros((len(tt), self.nchannels))
indices = frames - self.buffer_startframe
result[in_time] = self.buffer[indices]
return result
except IndexError as error:
warnings.warn(
"Error in file %s, " % (self.filename)
+ "At time t=%.02f-%.02f seconds, " % (tt[0], tt[-1])
+ "indices wanted: %d-%d, " % (indices.min(), indices.max())
+ "but len(buffer)=%d\n" % (len(self.buffer))
+ str(error),
UserWarning,
)
# repeat the last frame instead
indices[indices >= len(self.buffer)] = len(self.buffer) - 1
result[in_time] = self.buffer[indices]
return result
else:
ind = int(self.fps * tt)
if ind < 0 or ind > self.n_frames: # out of time: return 0
return np.zeros(self.nchannels)
if not (0 <= (ind - self.buffer_startframe) < len(self.buffer)):
# out of the buffer: recenter the buffer
self.buffer_around(ind)
# read the frame in the buffer
return self.buffer[ind - self.buffer_startframe]
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/readers.py#L169-L229
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33
] | 55.737705 |
[
34,
35,
45,
46,
47,
51,
52,
53,
55,
57,
60
] | 18.032787 | false | 85.321101 | 61 | 9 | 81.967213 | 1 |
def get_frame(self, tt):
if isinstance(tt, np.ndarray):
# lazy implementation, but should not cause problems in
# 99.99 % of the cases
# elements of t that are actually in the range of the
# audio file.
in_time = (tt >= 0) & (tt < self.duration)
# Check that the requested time is in the valid range
if not in_time.any():
raise IOError(
"Error in file %s, " % (self.filename)
+ "Accessing time t=%.02f-%.02f seconds, " % (tt[0], tt[-1])
+ "with clip duration=%f seconds, " % self.duration
)
# The np.round in the next line is super-important.
# Removing it results in artifacts in the noise.
frames = np.round((self.fps * tt)).astype(int)[in_time]
fr_min, fr_max = frames.min(), frames.max()
if not (0 <= (fr_min - self.buffer_startframe) < len(self.buffer)):
self.buffer_around(fr_min)
elif not (0 <= (fr_max - self.buffer_startframe) < len(self.buffer)):
self.buffer_around(fr_max)
try:
result = np.zeros((len(tt), self.nchannels))
indices = frames - self.buffer_startframe
result[in_time] = self.buffer[indices]
return result
except IndexError as error:
warnings.warn(
"Error in file %s, " % (self.filename)
+ "At time t=%.02f-%.02f seconds, " % (tt[0], tt[-1])
+ "indices wanted: %d-%d, " % (indices.min(), indices.max())
+ "but len(buffer)=%d\n" % (len(self.buffer))
+ str(error),
UserWarning,
)
# repeat the last frame instead
indices[indices >= len(self.buffer)] = len(self.buffer) - 1
result[in_time] = self.buffer[indices]
return result
else:
ind = int(self.fps * tt)
if ind < 0 or ind > self.n_frames: # out of time: return 0
return np.zeros(self.nchannels)
if not (0 <= (ind - self.buffer_startframe) < len(self.buffer)):
# out of the buffer: recenter the buffer
self.buffer_around(ind)
# read the frame in the buffer
return self.buffer[ind - self.buffer_startframe]
| 28,400 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/readers.py
|
FFMPEG_AudioReader.buffer_around
|
(self, frame_number)
|
Fills the buffer with frames, centered on ``frame_number``
if possible
|
Fills the buffer with frames, centered on ``frame_number``
if possible
| 231 | 254 |
def buffer_around(self, frame_number):
"""
Fills the buffer with frames, centered on ``frame_number``
if possible
"""
# start-frame for the buffer
new_bufferstart = max(0, frame_number - self.buffersize // 2)
if self.buffer is not None:
current_f_end = self.buffer_startframe + self.buffersize
if new_bufferstart < current_f_end < new_bufferstart + self.buffersize:
# We already have part of what must be read
conserved = current_f_end - new_bufferstart
chunksize = self.buffersize - conserved
array = self.read_chunk(chunksize)
self.buffer = np.vstack([self.buffer[-conserved:], array])
else:
self.seek(new_bufferstart)
self.buffer = self.read_chunk(self.buffersize)
else:
self.seek(new_bufferstart)
self.buffer = self.read_chunk(self.buffersize)
self.buffer_startframe = new_bufferstart
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/readers.py#L231-L254
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23
] | 100 |
[] | 0 | true | 85.321101 | 24 | 3 | 100 | 2 |
def buffer_around(self, frame_number):
# start-frame for the buffer
new_bufferstart = max(0, frame_number - self.buffersize // 2)
if self.buffer is not None:
current_f_end = self.buffer_startframe + self.buffersize
if new_bufferstart < current_f_end < new_bufferstart + self.buffersize:
# We already have part of what must be read
conserved = current_f_end - new_bufferstart
chunksize = self.buffersize - conserved
array = self.read_chunk(chunksize)
self.buffer = np.vstack([self.buffer[-conserved:], array])
else:
self.seek(new_bufferstart)
self.buffer = self.read_chunk(self.buffersize)
else:
self.seek(new_bufferstart)
self.buffer = self.read_chunk(self.buffersize)
self.buffer_startframe = new_bufferstart
| 28,401 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/readers.py
|
FFMPEG_AudioReader.close
|
(self)
|
Closes the reader, terminating the subprocess if is still alive.
|
Closes the reader, terminating the subprocess if is still alive.
| 256 | 264 |
def close(self):
"""Closes the reader, terminating the subprocess if is still alive."""
if self.proc:
if self.proc.poll() is None:
self.proc.terminate()
self.proc.stdout.close()
self.proc.stderr.close()
self.proc.wait()
self.proc = None
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/readers.py#L256-L264
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 100 |
[] | 0 | true | 85.321101 | 9 | 3 | 100 | 1 |
def close(self):
if self.proc:
if self.proc.poll() is None:
self.proc.terminate()
self.proc.stdout.close()
self.proc.stderr.close()
self.proc.wait()
self.proc = None
| 28,402 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/readers.py
|
FFMPEG_AudioReader.__del__
|
(self)
| 266 | 268 |
def __del__(self):
# If the garbage collector comes, make sure the subprocess is terminated.
self.close()
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/readers.py#L266-L268
| 46 |
[
0,
1,
2
] | 100 |
[] | 0 | true | 85.321101 | 3 | 1 | 100 | 0 |
def __del__(self):
# If the garbage collector comes, make sure the subprocess is terminated.
self.close()
| 28,403 |
|||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/preview.py
|
preview
|
(
clip, fps=22050, buffersize=4000, nbytes=2, audio_flag=None, video_flag=None
)
|
Plays the sound clip with pygame.
Parameters
----------
fps
Frame rate of the sound. 44100 gives top quality, but may cause
problems if your computer is not fast enough and your clip is
complicated. If the sound jumps during the preview, lower it
(11025 is still fine, 5000 is tolerable).
buffersize
The sound is not generated all at once, but rather made by bunches
of frames (chunks). ``buffersize`` is the size of such a chunk.
Try varying it if you meet audio problems (but you shouldn't
have to).
nbytes:
Number of bytes to encode the sound: 1 for 8bit sound, 2 for
16bit, 4 for 32bit sound. 2 bytes is fine.
audio_flag, video_flag:
Instances of class threading events that are used to synchronize
video and audio during ``VideoClip.preview()``.
|
Plays the sound clip with pygame.
| 16 | 71 |
def preview(
clip, fps=22050, buffersize=4000, nbytes=2, audio_flag=None, video_flag=None
):
"""
Plays the sound clip with pygame.
Parameters
----------
fps
Frame rate of the sound. 44100 gives top quality, but may cause
problems if your computer is not fast enough and your clip is
complicated. If the sound jumps during the preview, lower it
(11025 is still fine, 5000 is tolerable).
buffersize
The sound is not generated all at once, but rather made by bunches
of frames (chunks). ``buffersize`` is the size of such a chunk.
Try varying it if you meet audio problems (but you shouldn't
have to).
nbytes:
Number of bytes to encode the sound: 1 for 8bit sound, 2 for
16bit, 4 for 32bit sound. 2 bytes is fine.
audio_flag, video_flag:
Instances of class threading events that are used to synchronize
video and audio during ``VideoClip.preview()``.
"""
pg.mixer.quit()
pg.mixer.init(fps, -8 * nbytes, clip.nchannels, 1024)
totalsize = int(fps * clip.duration)
pospos = np.array(list(range(0, totalsize, buffersize)) + [totalsize])
timings = (1.0 / fps) * np.arange(pospos[0], pospos[1])
sndarray = clip.to_soundarray(timings, nbytes=nbytes, quantize=True)
chunk = pg.sndarray.make_sound(sndarray)
if (audio_flag is not None) and (video_flag is not None):
audio_flag.set()
video_flag.wait()
channel = chunk.play()
for i in range(1, len(pospos) - 1):
timings = (1.0 / fps) * np.arange(pospos[i], pospos[i + 1])
sndarray = clip.to_soundarray(timings, nbytes=nbytes, quantize=True)
chunk = pg.sndarray.make_sound(sndarray)
while channel.get_queue():
time.sleep(0.003)
if video_flag is not None:
if not video_flag.is_set():
channel.stop()
del channel
return
channel.queue(chunk)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/preview.py#L16-L71
| 46 |
[] | 0 |
[
0,
30,
32,
33,
34,
35,
36,
37,
39,
40,
41,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55
] | 42.857143 | false | 9.677419 | 56 | 7 | 57.142857 | 24 |
def preview(
clip, fps=22050, buffersize=4000, nbytes=2, audio_flag=None, video_flag=None
):
pg.mixer.quit()
pg.mixer.init(fps, -8 * nbytes, clip.nchannels, 1024)
totalsize = int(fps * clip.duration)
pospos = np.array(list(range(0, totalsize, buffersize)) + [totalsize])
timings = (1.0 / fps) * np.arange(pospos[0], pospos[1])
sndarray = clip.to_soundarray(timings, nbytes=nbytes, quantize=True)
chunk = pg.sndarray.make_sound(sndarray)
if (audio_flag is not None) and (video_flag is not None):
audio_flag.set()
video_flag.wait()
channel = chunk.play()
for i in range(1, len(pospos) - 1):
timings = (1.0 / fps) * np.arange(pospos[i], pospos[i + 1])
sndarray = clip.to_soundarray(timings, nbytes=nbytes, quantize=True)
chunk = pg.sndarray.make_sound(sndarray)
while channel.get_queue():
time.sleep(0.003)
if video_flag is not None:
if not video_flag.is_set():
channel.stop()
del channel
return
channel.queue(chunk)
| 28,404 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/AudioFileClip.py
|
AudioFileClip.__init__
|
(
self, filename, decode_file=False, buffersize=200000, nbytes=2, fps=44100
)
| 57 | 78 |
def __init__(
self, filename, decode_file=False, buffersize=200000, nbytes=2, fps=44100
):
AudioClip.__init__(self)
self.filename = filename
self.reader = FFMPEG_AudioReader(
filename,
decode_file=decode_file,
fps=fps,
nbytes=nbytes,
buffersize=buffersize,
)
self.fps = fps
self.duration = self.reader.duration
self.end = self.reader.duration
self.buffersize = self.reader.buffersize
self.filename = filename
self.make_frame = lambda t: self.reader.get_frame(t)
self.nchannels = self.reader.nchannels
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/AudioFileClip.py#L57-L78
| 46 |
[
0,
3,
4,
5,
6,
7,
14,
15,
16,
17,
18,
19,
20,
21
] | 63.636364 |
[] | 0 | false | 100 | 22 | 1 | 100 | 0 |
def __init__(
self, filename, decode_file=False, buffersize=200000, nbytes=2, fps=44100
):
AudioClip.__init__(self)
self.filename = filename
self.reader = FFMPEG_AudioReader(
filename,
decode_file=decode_file,
fps=fps,
nbytes=nbytes,
buffersize=buffersize,
)
self.fps = fps
self.duration = self.reader.duration
self.end = self.reader.duration
self.buffersize = self.reader.buffersize
self.filename = filename
self.make_frame = lambda t: self.reader.get_frame(t)
self.nchannels = self.reader.nchannels
| 28,405 |
|||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/io/AudioFileClip.py
|
AudioFileClip.close
|
(self)
|
Close the internal reader.
|
Close the internal reader.
| 80 | 84 |
def close(self):
"""Close the internal reader."""
if self.reader:
self.reader.close()
self.reader = None
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/io/AudioFileClip.py#L80-L84
| 46 |
[
0,
1,
2,
3,
4
] | 100 |
[] | 0 | true | 100 | 5 | 2 | 100 | 1 |
def close(self):
if self.reader:
self.reader.close()
self.reader = None
| 28,406 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/fx/multiply_volume.py
|
_multiply_volume_in_range
|
(factor, start_time, end_time, nchannels)
|
return multiply_mono_volume if nchannels == 1 else multiply_stereo_volume
| 6 | 19 |
def _multiply_volume_in_range(factor, start_time, end_time, nchannels):
def factors_filter(factor, t):
return np.array([factor if start_time <= t_ <= end_time else 1 for t_ in t])
def multiply_stereo_volume(get_frame, t):
return np.multiply(
get_frame(t),
np.array([factors_filter(factor, t) for _ in range(nchannels)]).T,
)
def multiply_mono_volume(get_frame, t):
return np.multiply(get_frame(t), factors_filter(factor, t))
return multiply_mono_volume if nchannels == 1 else multiply_stereo_volume
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/fx/multiply_volume.py#L6-L19
| 46 |
[
0,
1,
2,
3,
4,
5,
9,
10,
11,
12,
13
] | 78.571429 |
[] | 0 | false | 100 | 14 | 6 | 100 | 0 |
def _multiply_volume_in_range(factor, start_time, end_time, nchannels):
def factors_filter(factor, t):
return np.array([factor if start_time <= t_ <= end_time else 1 for t_ in t])
def multiply_stereo_volume(get_frame, t):
return np.multiply(
get_frame(t),
np.array([factors_filter(factor, t) for _ in range(nchannels)]).T,
)
def multiply_mono_volume(get_frame, t):
return np.multiply(get_frame(t), factors_filter(factor, t))
return multiply_mono_volume if nchannels == 1 else multiply_stereo_volume
| 28,407 |
||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/fx/multiply_volume.py
|
multiply_volume
|
(clip, factor, start_time=None, end_time=None)
|
return clip.transform(
_multiply_volume_in_range(
factor,
clip.start if start_time is None else start_time,
clip.end if end_time is None else end_time,
clip.nchannels,
),
keep_duration=True,
)
|
Returns a clip with audio volume multiplied by the
value `factor`. Can be applied to both audio and video clips.
Parameters
----------
factor : float
Volume multiplication factor.
start_time : float, optional
Time from the beginning of the clip until the volume transformation
begins to take effect, in seconds. By default at the beginning.
end_time : float, optional
Time from the beginning of the clip until the volume transformation
ends to take effect, in seconds. By default at the end.
Examples
--------
>>> from moviepy import AudioFileClip
>>>
>>> music = AudioFileClip('music.ogg')
>>> doubled_audio_clip = clip.multiply_volume(2) # doubles audio volume
>>> half_audio_clip = clip.multiply_volume(0.5) # half audio
>>>
>>> # silenced clip during one second at third
>>> silenced_clip = clip.multiply_volume(0, start_time=2, end_time=3)
|
Returns a clip with audio volume multiplied by the
value `factor`. Can be applied to both audio and video clips.
| 24 | 68 |
def multiply_volume(clip, factor, start_time=None, end_time=None):
"""Returns a clip with audio volume multiplied by the
value `factor`. Can be applied to both audio and video clips.
Parameters
----------
factor : float
Volume multiplication factor.
start_time : float, optional
Time from the beginning of the clip until the volume transformation
begins to take effect, in seconds. By default at the beginning.
end_time : float, optional
Time from the beginning of the clip until the volume transformation
ends to take effect, in seconds. By default at the end.
Examples
--------
>>> from moviepy import AudioFileClip
>>>
>>> music = AudioFileClip('music.ogg')
>>> doubled_audio_clip = clip.multiply_volume(2) # doubles audio volume
>>> half_audio_clip = clip.multiply_volume(0.5) # half audio
>>>
>>> # silenced clip during one second at third
>>> silenced_clip = clip.multiply_volume(0, start_time=2, end_time=3)
"""
if start_time is None and end_time is None:
return clip.transform(
lambda get_frame, t: factor * get_frame(t),
keep_duration=True,
)
return clip.transform(
_multiply_volume_in_range(
factor,
clip.start if start_time is None else start_time,
clip.end if end_time is None else end_time,
clip.nchannels,
),
keep_duration=True,
)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/fx/multiply_volume.py#L24-L68
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44
] | 100 |
[] | 0 | true | 100 | 45 | 3 | 100 | 28 |
def multiply_volume(clip, factor, start_time=None, end_time=None):
if start_time is None and end_time is None:
return clip.transform(
lambda get_frame, t: factor * get_frame(t),
keep_duration=True,
)
return clip.transform(
_multiply_volume_in_range(
factor,
clip.start if start_time is None else start_time,
clip.end if end_time is None else end_time,
clip.nchannels,
),
keep_duration=True,
)
| 28,408 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/fx/audio_normalize.py
|
audio_normalize
|
(clip)
|
Return a clip whose volume is normalized to 0db.
Return an audio (or video) clip whose audio volume is normalized
so that the maximum volume is at 0db, the maximum achievable volume.
Examples
--------
>>> from moviepy import *
>>> videoclip = VideoFileClip('myvideo.mp4').fx(afx.audio_normalize)
|
Return a clip whose volume is normalized to 0db.
| 6 | 25 |
def audio_normalize(clip):
"""Return a clip whose volume is normalized to 0db.
Return an audio (or video) clip whose audio volume is normalized
so that the maximum volume is at 0db, the maximum achievable volume.
Examples
--------
>>> from moviepy import *
>>> videoclip = VideoFileClip('myvideo.mp4').fx(afx.audio_normalize)
"""
max_volume = clip.max_volume()
if max_volume == 0:
# Nothing to normalize.
# Avoids a divide by zero error.
return clip.copy()
else:
return multiply_volume(clip, 1 / max_volume)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/fx/audio_normalize.py#L6-L25
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19
] | 100 |
[] | 0 | true | 100 | 20 | 2 | 100 | 10 |
def audio_normalize(clip):
max_volume = clip.max_volume()
if max_volume == 0:
# Nothing to normalize.
# Avoids a divide by zero error.
return clip.copy()
else:
return multiply_volume(clip, 1 / max_volume)
| 28,409 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/fx/audio_loop.py
|
audio_loop
|
(clip, n_loops=None, duration=None)
|
return concatenate_audioclips(n_loops * [clip])
|
Loops over an audio clip.
Returns an audio clip that plays the given clip either
`n_loops` times, or during `duration` seconds.
Examples
--------
>>> from moviepy import *
>>> videoclip = VideoFileClip('myvideo.mp4')
>>> music = AudioFileClip('music.ogg')
>>> audio = afx.audio_loop( music, duration=videoclip.duration)
>>> videoclip.with_audio(audio)
|
Loops over an audio clip.
| 6 | 26 |
def audio_loop(clip, n_loops=None, duration=None):
"""Loops over an audio clip.
Returns an audio clip that plays the given clip either
`n_loops` times, or during `duration` seconds.
Examples
--------
>>> from moviepy import *
>>> videoclip = VideoFileClip('myvideo.mp4')
>>> music = AudioFileClip('music.ogg')
>>> audio = afx.audio_loop( music, duration=videoclip.duration)
>>> videoclip.with_audio(audio)
"""
if duration is not None:
n_loops = int(duration / clip.duration) + 1
return concatenate_audioclips(n_loops * [clip]).with_duration(duration)
return concatenate_audioclips(n_loops * [clip])
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/fx/audio_loop.py#L6-L26
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15
] | 76.190476 |
[
16,
17,
18,
20
] | 19.047619 | false | 50 | 21 | 2 | 80.952381 | 13 |
def audio_loop(clip, n_loops=None, duration=None):
if duration is not None:
n_loops = int(duration / clip.duration) + 1
return concatenate_audioclips(n_loops * [clip]).with_duration(duration)
return concatenate_audioclips(n_loops * [clip])
| 28,410 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/fx/multiply_stereo_volume.py
|
multiply_stereo_volume
|
(clip, left=1, right=1)
|
return clip.transform(stereo_volume, keep_duration=True)
|
For a stereo audioclip, this function enables to change the volume
of the left and right channel separately (with the factors `left`
and `right`). Makes a stereo audio clip in which the volume of left
and right is controllable.
Examples
--------
>>> from moviepy import AudioFileClip
>>> music = AudioFileClip('music.ogg')
>>> audio_r = music.multiply_stereo_volume(left=0, right=1) # mute left channel/s
>>> audio_h = music.multiply_stereo_volume(left=0.5, right=0.5) # half audio
|
For a stereo audioclip, this function enables to change the volume
of the left and right channel separately (with the factors `left`
and `right`). Makes a stereo audio clip in which the volume of left
and right is controllable.
| 5 | 29 |
def multiply_stereo_volume(clip, left=1, right=1):
"""For a stereo audioclip, this function enables to change the volume
of the left and right channel separately (with the factors `left`
and `right`). Makes a stereo audio clip in which the volume of left
and right is controllable.
Examples
--------
>>> from moviepy import AudioFileClip
>>> music = AudioFileClip('music.ogg')
>>> audio_r = music.multiply_stereo_volume(left=0, right=1) # mute left channel/s
>>> audio_h = music.multiply_stereo_volume(left=0.5, right=0.5) # half audio
"""
def stereo_volume(get_frame, t):
frame = get_frame(t)
if len(frame) == 1: # mono
frame *= left if left is not None else right
else: # stereo, stereo surround...
for i in range(len(frame[0])): # odd channels are left
frame[:, i] *= left if i % 2 == 0 else right
return frame
return clip.transform(stereo_volume, keep_duration=True)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/fx/multiply_stereo_volume.py#L5-L29
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24
] | 100 |
[] | 0 | true | 100 | 25 | 4 | 100 | 12 |
def multiply_stereo_volume(clip, left=1, right=1):
def stereo_volume(get_frame, t):
frame = get_frame(t)
if len(frame) == 1: # mono
frame *= left if left is not None else right
else: # stereo, stereo surround...
for i in range(len(frame[0])): # odd channels are left
frame[:, i] *= left if i % 2 == 0 else right
return frame
return clip.transform(stereo_volume, keep_duration=True)
| 28,411 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/fx/audio_fadeout.py
|
_mono_factor_getter
|
(clip_duration)
|
return lambda t, duration: np.minimum(1.0 * (clip_duration - t) / duration, 1)
| 10 | 11 |
def _mono_factor_getter(clip_duration):
return lambda t, duration: np.minimum(1.0 * (clip_duration - t) / duration, 1)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/fx/audio_fadeout.py#L10-L11
| 46 |
[
0,
1
] | 100 |
[] | 0 | true | 100 | 2 | 1 | 100 | 0 |
def _mono_factor_getter(clip_duration):
return lambda t, duration: np.minimum(1.0 * (clip_duration - t) / duration, 1)
| 28,412 |
||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/fx/audio_fadeout.py
|
_stereo_factor_getter
|
(clip_duration, nchannels)
|
return getter
| 14 | 19 |
def _stereo_factor_getter(clip_duration, nchannels):
def getter(t, duration):
factor = np.minimum(1.0 * (clip_duration - t) / duration, 1)
return np.array([factor for _ in range(nchannels)]).T
return getter
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/fx/audio_fadeout.py#L14-L19
| 46 |
[
0,
1,
2,
3,
4,
5
] | 100 |
[] | 0 | true | 100 | 6 | 3 | 100 | 0 |
def _stereo_factor_getter(clip_duration, nchannels):
def getter(t, duration):
factor = np.minimum(1.0 * (clip_duration - t) / duration, 1)
return np.array([factor for _ in range(nchannels)]).T
return getter
| 28,413 |
||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/fx/audio_fadeout.py
|
audio_fadeout
|
(clip, duration)
|
return clip.transform(
lambda get_frame, t: get_factor(t, duration) * get_frame(t),
keep_duration=True,
)
|
Return a sound clip where the sound fades out progressively
over ``duration`` seconds at the end of the clip.
Parameters
----------
duration : float
How long does it take for the sound to reach the zero level at the end
of the clip.
Examples
--------
>>> clip = VideoFileClip("media/chaplin.mp4")
>>> clip.fx(audio_fadeout, "00:00:06")
|
Return a sound clip where the sound fades out progressively
over ``duration`` seconds at the end of the clip.
| 25 | 51 |
def audio_fadeout(clip, duration):
"""Return a sound clip where the sound fades out progressively
over ``duration`` seconds at the end of the clip.
Parameters
----------
duration : float
How long does it take for the sound to reach the zero level at the end
of the clip.
Examples
--------
>>> clip = VideoFileClip("media/chaplin.mp4")
>>> clip.fx(audio_fadeout, "00:00:06")
"""
get_factor = (
_mono_factor_getter(clip.duration)
if clip.nchannels == 1
else _stereo_factor_getter(clip.duration, clip.nchannels)
)
return clip.transform(
lambda get_frame, t: get_factor(t, duration) * get_frame(t),
keep_duration=True,
)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/fx/audio_fadeout.py#L25-L51
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26
] | 100 |
[] | 0 | true | 100 | 27 | 1 | 100 | 15 |
def audio_fadeout(clip, duration):
get_factor = (
_mono_factor_getter(clip.duration)
if clip.nchannels == 1
else _stereo_factor_getter(clip.duration, clip.nchannels)
)
return clip.transform(
lambda get_frame, t: get_factor(t, duration) * get_frame(t),
keep_duration=True,
)
| 28,414 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/fx/audio_fadein.py
|
_mono_factor_getter
|
()
|
return lambda t, duration: np.minimum(t / duration, 1)
| 6 | 7 |
def _mono_factor_getter():
return lambda t, duration: np.minimum(t / duration, 1)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/fx/audio_fadein.py#L6-L7
| 46 |
[
0,
1
] | 100 |
[] | 0 | true | 100 | 2 | 1 | 100 | 0 |
def _mono_factor_getter():
return lambda t, duration: np.minimum(t / duration, 1)
| 28,415 |
||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/fx/audio_fadein.py
|
_stereo_factor_getter
|
(nchannels)
|
return getter
| 10 | 15 |
def _stereo_factor_getter(nchannels):
def getter(t, duration):
factor = np.minimum(t / duration, 1)
return np.array([factor for _ in range(nchannels)]).T
return getter
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/fx/audio_fadein.py#L10-L15
| 46 |
[
0,
1,
2,
3,
4,
5
] | 100 |
[] | 0 | true | 100 | 6 | 3 | 100 | 0 |
def _stereo_factor_getter(nchannels):
def getter(t, duration):
factor = np.minimum(t / duration, 1)
return np.array([factor for _ in range(nchannels)]).T
return getter
| 28,416 |
||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/fx/audio_fadein.py
|
audio_fadein
|
(clip, duration)
|
return clip.transform(
lambda get_frame, t: get_factor(t, duration) * get_frame(t),
keep_duration=True,
)
|
Return an audio (or video) clip that is first mute, then the
sound arrives progressively over ``duration`` seconds.
Parameters
----------
duration : float
How long does it take for the sound to return to its normal level.
Examples
--------
>>> clip = VideoFileClip("media/chaplin.mp4")
>>> clip.fx(audio_fadein, "00:00:06")
|
Return an audio (or video) clip that is first mute, then the
sound arrives progressively over ``duration`` seconds.
| 20 | 45 |
def audio_fadein(clip, duration):
"""Return an audio (or video) clip that is first mute, then the
sound arrives progressively over ``duration`` seconds.
Parameters
----------
duration : float
How long does it take for the sound to return to its normal level.
Examples
--------
>>> clip = VideoFileClip("media/chaplin.mp4")
>>> clip.fx(audio_fadein, "00:00:06")
"""
get_factor = (
_mono_factor_getter()
if clip.nchannels == 1
else _stereo_factor_getter(clip.nchannels)
)
return clip.transform(
lambda get_frame, t: get_factor(t, duration) * get_frame(t),
keep_duration=True,
)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/fx/audio_fadein.py#L20-L45
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25
] | 100 |
[] | 0 | true | 100 | 26 | 1 | 100 | 14 |
def audio_fadein(clip, duration):
get_factor = (
_mono_factor_getter()
if clip.nchannels == 1
else _stereo_factor_getter(clip.nchannels)
)
return clip.transform(
lambda get_frame, t: get_factor(t, duration) * get_frame(t),
keep_duration=True,
)
| 28,417 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/audio/fx/audio_delay.py
|
audio_delay
|
(clip, offset=0.2, n_repeats=8, decay=1)
|
return CompositeAudioClip(
[
clip.copy(),
*[
multiply_volume(
clip.with_start((rep + 1) * offset), decayments[rep + 1]
)
for rep in range(n_repeats)
],
]
)
|
Repeats audio certain number of times at constant intervals multiplying
their volume levels using a linear space in the range 1 to ``decay`` argument
value.
Parameters
----------
offset : float, optional
Gap between repetitions start times, in seconds.
n_repeats : int, optional
Number of repetitions (without including the clip itself).
decay : float, optional
Multiplication factor for the volume level of the last repetition. Each
repetition will have a value in the linear function between 1 and this value,
increasing or decreasing constantly. Keep in mind that the last repetition
will be muted if this is 0, and if is greater than 1, the volume will increase
for each repetition.
Examples
--------
>>> from moviepy import *
>>> videoclip = AudioFileClip('myaudio.wav').fx(
... audio_delay, offset=.2, n_repeats=10, decayment=.2
... )
>>> # stereo A note
>>> make_frame = lambda t: np.array(
... [np.sin(440 * 2 * np.pi * t), np.sin(880 * 2 * np.pi * t)]
... ).T
... clip = AudioClip(make_frame=make_frame, duration=0.1, fps=44100)
... clip = audio_delay(clip, offset=.2, n_repeats=11, decay=0)
|
Repeats audio certain number of times at constant intervals multiplying
their volume levels using a linear space in the range 1 to ``decay`` argument
value.
| 9 | 56 |
def audio_delay(clip, offset=0.2, n_repeats=8, decay=1):
"""Repeats audio certain number of times at constant intervals multiplying
their volume levels using a linear space in the range 1 to ``decay`` argument
value.
Parameters
----------
offset : float, optional
Gap between repetitions start times, in seconds.
n_repeats : int, optional
Number of repetitions (without including the clip itself).
decay : float, optional
Multiplication factor for the volume level of the last repetition. Each
repetition will have a value in the linear function between 1 and this value,
increasing or decreasing constantly. Keep in mind that the last repetition
will be muted if this is 0, and if is greater than 1, the volume will increase
for each repetition.
Examples
--------
>>> from moviepy import *
>>> videoclip = AudioFileClip('myaudio.wav').fx(
... audio_delay, offset=.2, n_repeats=10, decayment=.2
... )
>>> # stereo A note
>>> make_frame = lambda t: np.array(
... [np.sin(440 * 2 * np.pi * t), np.sin(880 * 2 * np.pi * t)]
... ).T
... clip = AudioClip(make_frame=make_frame, duration=0.1, fps=44100)
... clip = audio_delay(clip, offset=.2, n_repeats=11, decay=0)
"""
decayments = np.linspace(1, max(0, decay), n_repeats + 1)
return CompositeAudioClip(
[
clip.copy(),
*[
multiply_volume(
clip.with_start((rep + 1) * offset), decayments[rep + 1]
)
for rep in range(n_repeats)
],
]
)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/audio/fx/audio_delay.py#L9-L56
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47
] | 100 |
[] | 0 | true | 100 | 48 | 2 | 100 | 34 |
def audio_delay(clip, offset=0.2, n_repeats=8, decay=1):
decayments = np.linspace(1, max(0, decay), n_repeats + 1)
return CompositeAudioClip(
[
clip.copy(),
*[
multiply_volume(
clip.with_start((rep + 1) * offset), decayments[rep + 1]
)
for rep in range(n_repeats)
],
]
)
| 28,418 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.__init__
|
(
self, make_frame=None, is_mask=False, duration=None, has_constant_size=True
)
| 96 | 112 |
def __init__(
self, make_frame=None, is_mask=False, duration=None, has_constant_size=True
):
super().__init__()
self.mask = None
self.audio = None
self.pos = lambda t: (0, 0)
self.relative_pos = False
self.layer = 0
if make_frame:
self.make_frame = make_frame
self.size = self.get_frame(0).shape[:2][::-1]
self.is_mask = is_mask
self.has_constant_size = has_constant_size
if duration is not None:
self.duration = duration
self.end = duration
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L96-L112
| 46 |
[
0,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16
] | 88.235294 |
[] | 0 | false | 72.389791 | 17 | 3 | 100 | 0 |
def __init__(
self, make_frame=None, is_mask=False, duration=None, has_constant_size=True
):
super().__init__()
self.mask = None
self.audio = None
self.pos = lambda t: (0, 0)
self.relative_pos = False
self.layer = 0
if make_frame:
self.make_frame = make_frame
self.size = self.get_frame(0).shape[:2][::-1]
self.is_mask = is_mask
self.has_constant_size = has_constant_size
if duration is not None:
self.duration = duration
self.end = duration
| 28,419 |
|||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.w
|
(self)
|
return self.size[0]
|
Returns the width of the video.
|
Returns the width of the video.
| 115 | 117 |
def w(self):
"""Returns the width of the video."""
return self.size[0]
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L115-L117
| 46 |
[
0,
1,
2
] | 100 |
[] | 0 | true | 72.389791 | 3 | 1 | 100 | 1 |
def w(self):
return self.size[0]
| 28,420 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.h
|
(self)
|
return self.size[1]
|
Returns the height of the video.
|
Returns the height of the video.
| 120 | 122 |
def h(self):
"""Returns the height of the video."""
return self.size[1]
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L120-L122
| 46 |
[
0,
1,
2
] | 100 |
[] | 0 | true | 72.389791 | 3 | 1 | 100 | 1 |
def h(self):
return self.size[1]
| 28,421 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.aspect_ratio
|
(self)
|
return self.w / float(self.h)
|
Returns the aspect ratio of the video.
|
Returns the aspect ratio of the video.
| 125 | 127 |
def aspect_ratio(self):
"""Returns the aspect ratio of the video."""
return self.w / float(self.h)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L125-L127
| 46 |
[
0,
1,
2
] | 100 |
[] | 0 | true | 72.389791 | 3 | 1 | 100 | 1 |
def aspect_ratio(self):
return self.w / float(self.h)
| 28,422 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.n_frames
|
(self)
|
return int(self.duration * self.fps)
|
Returns the number of frames of the video.
|
Returns the number of frames of the video.
| 132 | 134 |
def n_frames(self):
"""Returns the number of frames of the video."""
return int(self.duration * self.fps)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L132-L134
| 46 |
[
0,
1,
2
] | 100 |
[] | 0 | true | 72.389791 | 3 | 1 | 100 | 1 |
def n_frames(self):
return int(self.duration * self.fps)
| 28,423 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.__copy__
|
(self)
|
return new_clip
|
Mixed copy of the clip.
Returns a shallow copy of the clip whose mask and audio will
be shallow copies of the clip's mask and audio if they exist.
This method is intensively used to produce new clips every time
there is an outplace transformation of the clip (clip.resize,
clip.subclip, etc.)
Acts like a deepcopy except for the fact that readers and other
possible unpickleables objects are not copied.
|
Mixed copy of the clip.
| 136 | 156 |
def __copy__(self):
"""Mixed copy of the clip.
Returns a shallow copy of the clip whose mask and audio will
be shallow copies of the clip's mask and audio if they exist.
This method is intensively used to produce new clips every time
there is an outplace transformation of the clip (clip.resize,
clip.subclip, etc.)
Acts like a deepcopy except for the fact that readers and other
possible unpickleables objects are not copied.
"""
cls = self.__class__
new_clip = cls.__new__(cls)
for attr in self.__dict__:
value = getattr(self, attr)
if attr in ("mask", "audio"):
value = _copy.copy(value)
setattr(new_clip, attr, value)
return new_clip
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L136-L156
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20
] | 100 |
[] | 0 | true | 72.389791 | 21 | 3 | 100 | 11 |
def __copy__(self):
cls = self.__class__
new_clip = cls.__new__(cls)
for attr in self.__dict__:
value = getattr(self, attr)
if attr in ("mask", "audio"):
value = _copy.copy(value)
setattr(new_clip, attr, value)
return new_clip
| 28,424 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.save_frame
|
(self, filename, t=0, with_mask=True)
|
Save a clip's frame to an image file.
Saves the frame of clip corresponding to time ``t`` in ``filename``.
``t`` can be expressed in seconds (15.35), in (min, sec),
in (hour, min, sec), or as a string: '01:03:05.35'.
Parameters
----------
filename : str
Name of the file in which the frame will be stored.
t : float or tuple or str, optional
Moment of the frame to be saved. As default, the first frame will be
saved.
with_mask : bool, optional
If is ``True`` the mask is saved in the alpha layer of the picture
(only works with PNGs).
|
Save a clip's frame to an image file.
| 165 | 193 |
def save_frame(self, filename, t=0, with_mask=True):
"""Save a clip's frame to an image file.
Saves the frame of clip corresponding to time ``t`` in ``filename``.
``t`` can be expressed in seconds (15.35), in (min, sec),
in (hour, min, sec), or as a string: '01:03:05.35'.
Parameters
----------
filename : str
Name of the file in which the frame will be stored.
t : float or tuple or str, optional
Moment of the frame to be saved. As default, the first frame will be
saved.
with_mask : bool, optional
If is ``True`` the mask is saved in the alpha layer of the picture
(only works with PNGs).
"""
im = self.get_frame(t)
if with_mask and self.mask is not None:
mask = 255 * self.mask.get_frame(t)
im = np.dstack([im, mask]).astype("uint8")
else:
im = im.astype("uint8")
imsave(filename, im)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L165-L193
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28
] | 100 |
[] | 0 | true | 72.389791 | 29 | 3 | 100 | 19 |
def save_frame(self, filename, t=0, with_mask=True):
im = self.get_frame(t)
if with_mask and self.mask is not None:
mask = 255 * self.mask.get_frame(t)
im = np.dstack([im, mask]).astype("uint8")
else:
im = im.astype("uint8")
imsave(filename, im)
| 28,425 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.write_videofile
|
(
self,
filename,
fps=None,
codec=None,
bitrate=None,
audio=True,
audio_fps=44100,
preset="medium",
audio_nbytes=4,
audio_codec=None,
audio_bitrate=None,
audio_bufsize=2000,
temp_audiofile=None,
temp_audiofile_path="",
remove_temp=True,
write_logfile=False,
threads=None,
ffmpeg_params=None,
logger="bar",
pixel_format=None,
)
|
Write the clip to a videofile.
Parameters
----------
filename
Name of the video file to write in, as a string or a path-like object.
The extension must correspond to the "codec" used (see below),
or simply be '.avi' (which will work with any codec).
fps
Number of frames per second in the resulting video file. If None is
provided, and the clip has an fps attribute, this fps will be used.
codec
Codec to use for image encoding. Can be any codec supported
by ffmpeg. If the filename is has extension '.mp4', '.ogv', '.webm',
the codec will be set accordingly, but you can still set it if you
don't like the default. For other extensions, the output filename
must be set accordingly.
Some examples of codecs are:
- ``'libx264'`` (default codec for file extension ``.mp4``)
makes well-compressed videos (quality tunable using 'bitrate').
- ``'mpeg4'`` (other codec for extension ``.mp4``) can be an alternative
to ``'libx264'``, and produces higher quality videos by default.
- ``'rawvideo'`` (use file extension ``.avi``) will produce
a video of perfect quality, of possibly very huge size.
- ``png`` (use file extension ``.avi``) will produce a video
of perfect quality, of smaller size than with ``rawvideo``.
- ``'libvorbis'`` (use file extension ``.ogv``) is a nice video
format, which is completely free/ open source. However not
everyone has the codecs installed by default on their machine.
- ``'libvpx'`` (use file extension ``.webm``) is tiny a video
format well indicated for web videos (with HTML5). Open source.
audio
Either ``True``, ``False``, or a file name.
If ``True`` and the clip has an audio clip attached, this
audio clip will be incorporated as a soundtrack in the movie.
If ``audio`` is the name of an audio file, this audio file
will be incorporated as a soundtrack in the movie.
audio_fps
frame rate to use when generating the sound.
temp_audiofile
the name of the temporary audiofile, as a string or path-like object,
to be created and then used to write the complete video, if any.
temp_audiofile_path
the location that the temporary audiofile is placed, as a
string or path-like object. Defaults to the current working directory.
audio_codec
Which audio codec should be used. Examples are 'libmp3lame'
for '.mp3', 'libvorbis' for 'ogg', 'libfdk_aac':'m4a',
'pcm_s16le' for 16-bit wav and 'pcm_s32le' for 32-bit wav.
Default is 'libmp3lame', unless the video extension is 'ogv'
or 'webm', at which case the default is 'libvorbis'.
audio_bitrate
Audio bitrate, given as a string like '50k', '500k', '3000k'.
Will determine the size/quality of audio in the output file.
Note that it mainly an indicative goal, the bitrate won't
necessarily be the this in the final file.
preset
Sets the time that FFMPEG will spend optimizing the compression.
Choices are: ultrafast, superfast, veryfast, faster, fast, medium,
slow, slower, veryslow, placebo. Note that this does not impact
the quality of the video, only the size of the video file. So
choose ultrafast when you are in a hurry and file size does not
matter.
threads
Number of threads to use for ffmpeg. Can speed up the writing of
the video on multicore computers.
ffmpeg_params
Any additional ffmpeg parameters you would like to pass, as a list
of terms, like ['-option1', 'value1', '-option2', 'value2'].
write_logfile
If true, will write log files for the audio and the video.
These will be files ending with '.log' with the name of the
output file in them.
logger
Either ``"bar"`` for progress bar or ``None`` or any Proglog logger.
pixel_format
Pixel format for the output video file.
Examples
--------
>>> from moviepy import VideoFileClip
>>> clip = VideoFileClip("myvideo.mp4").subclip(100,120)
>>> clip.write_videofile("my_new_video.mp4")
>>> clip.close()
|
Write the clip to a videofile.
| 199 | 398 |
def write_videofile(
self,
filename,
fps=None,
codec=None,
bitrate=None,
audio=True,
audio_fps=44100,
preset="medium",
audio_nbytes=4,
audio_codec=None,
audio_bitrate=None,
audio_bufsize=2000,
temp_audiofile=None,
temp_audiofile_path="",
remove_temp=True,
write_logfile=False,
threads=None,
ffmpeg_params=None,
logger="bar",
pixel_format=None,
):
"""Write the clip to a videofile.
Parameters
----------
filename
Name of the video file to write in, as a string or a path-like object.
The extension must correspond to the "codec" used (see below),
or simply be '.avi' (which will work with any codec).
fps
Number of frames per second in the resulting video file. If None is
provided, and the clip has an fps attribute, this fps will be used.
codec
Codec to use for image encoding. Can be any codec supported
by ffmpeg. If the filename is has extension '.mp4', '.ogv', '.webm',
the codec will be set accordingly, but you can still set it if you
don't like the default. For other extensions, the output filename
must be set accordingly.
Some examples of codecs are:
- ``'libx264'`` (default codec for file extension ``.mp4``)
makes well-compressed videos (quality tunable using 'bitrate').
- ``'mpeg4'`` (other codec for extension ``.mp4``) can be an alternative
to ``'libx264'``, and produces higher quality videos by default.
- ``'rawvideo'`` (use file extension ``.avi``) will produce
a video of perfect quality, of possibly very huge size.
- ``png`` (use file extension ``.avi``) will produce a video
of perfect quality, of smaller size than with ``rawvideo``.
- ``'libvorbis'`` (use file extension ``.ogv``) is a nice video
format, which is completely free/ open source. However not
everyone has the codecs installed by default on their machine.
- ``'libvpx'`` (use file extension ``.webm``) is tiny a video
format well indicated for web videos (with HTML5). Open source.
audio
Either ``True``, ``False``, or a file name.
If ``True`` and the clip has an audio clip attached, this
audio clip will be incorporated as a soundtrack in the movie.
If ``audio`` is the name of an audio file, this audio file
will be incorporated as a soundtrack in the movie.
audio_fps
frame rate to use when generating the sound.
temp_audiofile
the name of the temporary audiofile, as a string or path-like object,
to be created and then used to write the complete video, if any.
temp_audiofile_path
the location that the temporary audiofile is placed, as a
string or path-like object. Defaults to the current working directory.
audio_codec
Which audio codec should be used. Examples are 'libmp3lame'
for '.mp3', 'libvorbis' for 'ogg', 'libfdk_aac':'m4a',
'pcm_s16le' for 16-bit wav and 'pcm_s32le' for 32-bit wav.
Default is 'libmp3lame', unless the video extension is 'ogv'
or 'webm', at which case the default is 'libvorbis'.
audio_bitrate
Audio bitrate, given as a string like '50k', '500k', '3000k'.
Will determine the size/quality of audio in the output file.
Note that it mainly an indicative goal, the bitrate won't
necessarily be the this in the final file.
preset
Sets the time that FFMPEG will spend optimizing the compression.
Choices are: ultrafast, superfast, veryfast, faster, fast, medium,
slow, slower, veryslow, placebo. Note that this does not impact
the quality of the video, only the size of the video file. So
choose ultrafast when you are in a hurry and file size does not
matter.
threads
Number of threads to use for ffmpeg. Can speed up the writing of
the video on multicore computers.
ffmpeg_params
Any additional ffmpeg parameters you would like to pass, as a list
of terms, like ['-option1', 'value1', '-option2', 'value2'].
write_logfile
If true, will write log files for the audio and the video.
These will be files ending with '.log' with the name of the
output file in them.
logger
Either ``"bar"`` for progress bar or ``None`` or any Proglog logger.
pixel_format
Pixel format for the output video file.
Examples
--------
>>> from moviepy import VideoFileClip
>>> clip = VideoFileClip("myvideo.mp4").subclip(100,120)
>>> clip.write_videofile("my_new_video.mp4")
>>> clip.close()
"""
name, ext = os.path.splitext(os.path.basename(filename))
ext = ext[1:].lower()
logger = proglog.default_bar_logger(logger)
if codec is None:
try:
codec = extensions_dict[ext]["codec"][0]
except KeyError:
raise ValueError(
"MoviePy couldn't find the codec associated "
"with the filename. Provide the 'codec' "
"parameter in write_videofile."
)
if audio_codec is None:
if ext in ["ogv", "webm"]:
audio_codec = "libvorbis"
else:
audio_codec = "libmp3lame"
elif audio_codec == "raw16":
audio_codec = "pcm_s16le"
elif audio_codec == "raw32":
audio_codec = "pcm_s32le"
audiofile = audio if isinstance(audio, str) else None
make_audio = (
(audiofile is None) and (audio is True) and (self.audio is not None)
)
if make_audio and temp_audiofile:
# The audio will be the clip's audio
audiofile = temp_audiofile
elif make_audio:
audio_ext = find_extension(audio_codec)
audiofile = os.path.join(
temp_audiofile_path,
name + Clip._TEMP_FILES_PREFIX + "wvf_snd.%s" % audio_ext,
)
# enough cpu for multiprocessing ? USELESS RIGHT NOW, WILL COME AGAIN
# enough_cpu = (multiprocessing.cpu_count() > 1)
logger(message="Moviepy - Building video %s." % filename)
if make_audio:
self.audio.write_audiofile(
audiofile,
audio_fps,
audio_nbytes,
audio_bufsize,
audio_codec,
bitrate=audio_bitrate,
write_logfile=write_logfile,
logger=logger,
)
ffmpeg_write_video(
self,
filename,
fps,
codec,
bitrate=bitrate,
preset=preset,
write_logfile=write_logfile,
audiofile=audiofile,
threads=threads,
ffmpeg_params=ffmpeg_params,
logger=logger,
pixel_format=pixel_format,
)
if remove_temp and make_audio:
if os.path.exists(audiofile):
os.remove(audiofile)
logger(message="Moviepy - video ready %s" % filename)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L199-L398
| 46 |
[
0,
125,
126,
127,
128,
129,
130,
131,
132,
133,
134,
135,
136,
137,
138,
139,
140,
141,
142,
143,
144,
145,
150,
151,
152,
153,
154,
155,
156,
157,
159,
160,
161,
162,
163,
164,
165,
166,
167,
168,
169,
170,
171,
172,
173,
174,
175,
176,
177,
178,
179,
180,
181,
182,
183,
184,
185,
186,
187,
188,
189,
190,
191,
192,
193,
194,
195,
196,
197,
198,
199
] | 35.5 |
[
146,
147,
148,
149,
158
] | 2.5 | false | 72.389791 | 200 | 16 | 97.5 | 102 |
def write_videofile(
self,
filename,
fps=None,
codec=None,
bitrate=None,
audio=True,
audio_fps=44100,
preset="medium",
audio_nbytes=4,
audio_codec=None,
audio_bitrate=None,
audio_bufsize=2000,
temp_audiofile=None,
temp_audiofile_path="",
remove_temp=True,
write_logfile=False,
threads=None,
ffmpeg_params=None,
logger="bar",
pixel_format=None,
):
name, ext = os.path.splitext(os.path.basename(filename))
ext = ext[1:].lower()
logger = proglog.default_bar_logger(logger)
if codec is None:
try:
codec = extensions_dict[ext]["codec"][0]
except KeyError:
raise ValueError(
"MoviePy couldn't find the codec associated "
"with the filename. Provide the 'codec' "
"parameter in write_videofile."
)
if audio_codec is None:
if ext in ["ogv", "webm"]:
audio_codec = "libvorbis"
else:
audio_codec = "libmp3lame"
elif audio_codec == "raw16":
audio_codec = "pcm_s16le"
elif audio_codec == "raw32":
audio_codec = "pcm_s32le"
audiofile = audio if isinstance(audio, str) else None
make_audio = (
(audiofile is None) and (audio is True) and (self.audio is not None)
)
if make_audio and temp_audiofile:
# The audio will be the clip's audio
audiofile = temp_audiofile
elif make_audio:
audio_ext = find_extension(audio_codec)
audiofile = os.path.join(
temp_audiofile_path,
name + Clip._TEMP_FILES_PREFIX + "wvf_snd.%s" % audio_ext,
)
# enough cpu for multiprocessing ? USELESS RIGHT NOW, WILL COME AGAIN
# enough_cpu = (multiprocessing.cpu_count() > 1)
logger(message="Moviepy - Building video %s." % filename)
if make_audio:
self.audio.write_audiofile(
audiofile,
audio_fps,
audio_nbytes,
audio_bufsize,
audio_codec,
bitrate=audio_bitrate,
write_logfile=write_logfile,
logger=logger,
)
ffmpeg_write_video(
self,
filename,
fps,
codec,
bitrate=bitrate,
preset=preset,
write_logfile=write_logfile,
audiofile=audiofile,
threads=threads,
ffmpeg_params=ffmpeg_params,
logger=logger,
pixel_format=pixel_format,
)
if remove_temp and make_audio:
if os.path.exists(audiofile):
os.remove(audiofile)
logger(message="Moviepy - video ready %s" % filename)
| 28,426 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.write_images_sequence
|
(
self, name_format, fps=None, with_mask=True, logger="bar"
)
|
return filenames
|
Writes the videoclip to a sequence of image files.
Parameters
----------
name_format
A filename specifying the numerotation format and extension
of the pictures. For instance "frame%03d.png" for filenames
indexed with 3 digits and PNG format. Also possible:
"some_folder/frame%04d.jpeg", etc.
fps
Number of frames per second to consider when writing the
clip. If not specified, the clip's ``fps`` attribute will
be used if it has one.
with_mask
will save the clip's mask (if any) as an alpha canal (PNGs only).
logger
Either ``"bar"`` for progress bar or ``None`` or any Proglog logger.
Returns
-------
names_list
A list of all the files generated.
Notes
-----
The resulting image sequence can be read using e.g. the class
``ImageSequenceClip``.
|
Writes the videoclip to a sequence of image files.
| 403 | 455 |
def write_images_sequence(
self, name_format, fps=None, with_mask=True, logger="bar"
):
"""Writes the videoclip to a sequence of image files.
Parameters
----------
name_format
A filename specifying the numerotation format and extension
of the pictures. For instance "frame%03d.png" for filenames
indexed with 3 digits and PNG format. Also possible:
"some_folder/frame%04d.jpeg", etc.
fps
Number of frames per second to consider when writing the
clip. If not specified, the clip's ``fps`` attribute will
be used if it has one.
with_mask
will save the clip's mask (if any) as an alpha canal (PNGs only).
logger
Either ``"bar"`` for progress bar or ``None`` or any Proglog logger.
Returns
-------
names_list
A list of all the files generated.
Notes
-----
The resulting image sequence can be read using e.g. the class
``ImageSequenceClip``.
"""
logger = proglog.default_bar_logger(logger)
# Fails on GitHub macos CI
# logger(message="Moviepy - Writing frames %s." % name_format)
timings = np.arange(0, self.duration, 1.0 / fps)
filenames = []
for i, t in logger.iter_bar(t=list(enumerate(timings))):
name = name_format % i
filenames.append(name)
self.save_frame(name, t, with_mask=with_mask)
# logger(message="Moviepy - Done writing frames %s." % name_format)
return filenames
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L403-L455
| 46 |
[
0,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52
] | 30.188679 |
[] | 0 | false | 72.389791 | 53 | 2 | 100 | 34 |
def write_images_sequence(
self, name_format, fps=None, with_mask=True, logger="bar"
):
logger = proglog.default_bar_logger(logger)
# Fails on GitHub macos CI
# logger(message="Moviepy - Writing frames %s." % name_format)
timings = np.arange(0, self.duration, 1.0 / fps)
filenames = []
for i, t in logger.iter_bar(t=list(enumerate(timings))):
name = name_format % i
filenames.append(name)
self.save_frame(name, t, with_mask=with_mask)
# logger(message="Moviepy - Done writing frames %s." % name_format)
return filenames
| 28,427 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.write_gif
|
(
self,
filename,
fps=None,
program="imageio",
opt="nq",
fuzz=1,
loop=0,
dispose=False,
colors=None,
tempfiles=False,
logger="bar",
pixel_format=None,
)
|
Write the VideoClip to a GIF file.
Converts a VideoClip into an animated GIF using ImageMagick
or ffmpeg.
Parameters
----------
filename
Name of the resulting gif file, as a string or a path-like object.
fps
Number of frames per second (see note below). If it
isn't provided, then the function will look for the clip's
``fps`` attribute (VideoFileClip, for instance, have one).
program
Software to use for the conversion, either 'imageio' (this will use
the library FreeImage through ImageIO), or 'ImageMagick', or 'ffmpeg'.
opt
Optimalization to apply. If program='imageio', opt must be either 'wu'
(Wu) or 'nq' (Neuquant). If program='ImageMagick',
either 'optimizeplus' or 'OptimizeTransparency'.
fuzz
(ImageMagick only) Compresses the GIF by considering that
the colors that are less than fuzz% different are in fact
the same.
tempfiles
Writes every frame to a file instead of passing them in the RAM.
Useful on computers with little RAM. Can only be used with
ImageMagick' or 'ffmpeg'.
progress_bar
If True, displays a progress bar
pixel_format
Pixel format for the output gif file. If is not specified
'rgb24' will be used as the default format unless ``clip.mask``
exist, then 'rgba' will be used. This option is only going to
be accepted if ``program=ffmpeg`` or when ``tempfiles=True``
Notes
-----
The gif will be playing the clip in real time (you can
only change the frame rate). If you want the gif to be played
slower than the clip you will use ::
>>> # slow down clip 50% and make it a gif
>>> myClip.multiply_speed(0.5).to_gif('myClip.gif')
|
Write the VideoClip to a GIF file.
| 460 | 576 |
def write_gif(
self,
filename,
fps=None,
program="imageio",
opt="nq",
fuzz=1,
loop=0,
dispose=False,
colors=None,
tempfiles=False,
logger="bar",
pixel_format=None,
):
"""Write the VideoClip to a GIF file.
Converts a VideoClip into an animated GIF using ImageMagick
or ffmpeg.
Parameters
----------
filename
Name of the resulting gif file, as a string or a path-like object.
fps
Number of frames per second (see note below). If it
isn't provided, then the function will look for the clip's
``fps`` attribute (VideoFileClip, for instance, have one).
program
Software to use for the conversion, either 'imageio' (this will use
the library FreeImage through ImageIO), or 'ImageMagick', or 'ffmpeg'.
opt
Optimalization to apply. If program='imageio', opt must be either 'wu'
(Wu) or 'nq' (Neuquant). If program='ImageMagick',
either 'optimizeplus' or 'OptimizeTransparency'.
fuzz
(ImageMagick only) Compresses the GIF by considering that
the colors that are less than fuzz% different are in fact
the same.
tempfiles
Writes every frame to a file instead of passing them in the RAM.
Useful on computers with little RAM. Can only be used with
ImageMagick' or 'ffmpeg'.
progress_bar
If True, displays a progress bar
pixel_format
Pixel format for the output gif file. If is not specified
'rgb24' will be used as the default format unless ``clip.mask``
exist, then 'rgba' will be used. This option is only going to
be accepted if ``program=ffmpeg`` or when ``tempfiles=True``
Notes
-----
The gif will be playing the clip in real time (you can
only change the frame rate). If you want the gif to be played
slower than the clip you will use ::
>>> # slow down clip 50% and make it a gif
>>> myClip.multiply_speed(0.5).to_gif('myClip.gif')
"""
# A little sketchy at the moment, maybe move all that in write_gif,
# refactor a little... we will see.
if program == "imageio":
write_gif_with_image_io(
self,
filename,
fps=fps,
opt=opt,
loop=loop,
colors=colors,
logger=logger,
)
elif tempfiles:
# convert imageio opt variable to something that can be used with
# ImageMagick
opt = "optimizeplus" if opt == "nq" else "OptimizeTransparency"
write_gif_with_tempfiles(
self,
filename,
fps=fps,
program=program,
opt=opt,
fuzz=fuzz,
loop=loop,
dispose=dispose,
colors=colors,
logger=logger,
pixel_format=pixel_format,
)
else:
# convert imageio opt variable to something that can be used with
# ImageMagick
opt = "optimizeplus" if opt == "nq" else "OptimizeTransparency"
write_gif(
self,
filename,
fps=fps,
program=program,
opt=opt,
fuzz=fuzz,
loop=loop,
dispose=dispose,
colors=colors,
logger=logger,
pixel_format=pixel_format,
)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L460-L576
| 46 |
[
0,
72,
73,
74,
83,
84,
85,
86,
87,
102,
103,
104
] | 10.25641 |
[] | 0 | false | 72.389791 | 117 | 3 | 100 | 54 |
def write_gif(
self,
filename,
fps=None,
program="imageio",
opt="nq",
fuzz=1,
loop=0,
dispose=False,
colors=None,
tempfiles=False,
logger="bar",
pixel_format=None,
):
# A little sketchy at the moment, maybe move all that in write_gif,
# refactor a little... we will see.
if program == "imageio":
write_gif_with_image_io(
self,
filename,
fps=fps,
opt=opt,
loop=loop,
colors=colors,
logger=logger,
)
elif tempfiles:
# convert imageio opt variable to something that can be used with
# ImageMagick
opt = "optimizeplus" if opt == "nq" else "OptimizeTransparency"
write_gif_with_tempfiles(
self,
filename,
fps=fps,
program=program,
opt=opt,
fuzz=fuzz,
loop=loop,
dispose=dispose,
colors=colors,
logger=logger,
pixel_format=pixel_format,
)
else:
# convert imageio opt variable to something that can be used with
# ImageMagick
opt = "optimizeplus" if opt == "nq" else "OptimizeTransparency"
write_gif(
self,
filename,
fps=fps,
program=program,
opt=opt,
fuzz=fuzz,
loop=loop,
dispose=dispose,
colors=colors,
logger=logger,
pixel_format=pixel_format,
)
| 28,428 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.subfx
|
(self, fx, start_time=0, end_time=None, **kwargs)
|
return concatenate_videoclips(clips).with_start(self.start)
|
Apply a transformation to a part of the clip.
Returns a new clip in which the function ``fun`` (clip->clip)
has been applied to the subclip between times `start_time` and `end_time`
(in seconds).
Examples
--------
>>> # The scene between times t=3s and t=6s in ``clip`` will be
>>> # be played twice slower in ``new_clip``
>>> new_clip = clip.subapply(lambda c:c.multiply_speed(0.5) , 3,6)
|
Apply a transformation to a part of the clip.
| 581 | 605 |
def subfx(self, fx, start_time=0, end_time=None, **kwargs):
"""Apply a transformation to a part of the clip.
Returns a new clip in which the function ``fun`` (clip->clip)
has been applied to the subclip between times `start_time` and `end_time`
(in seconds).
Examples
--------
>>> # The scene between times t=3s and t=6s in ``clip`` will be
>>> # be played twice slower in ``new_clip``
>>> new_clip = clip.subapply(lambda c:c.multiply_speed(0.5) , 3,6)
"""
left = None if (start_time == 0) else self.subclip(0, start_time)
center = self.subclip(start_time, end_time).fx(fx, **kwargs)
right = None if (end_time is None) else self.subclip(start_time=end_time)
clips = [clip for clip in [left, center, right] if clip is not None]
# beurk, have to find other solution
from moviepy.video.compositing.concatenate import concatenate_videoclips
return concatenate_videoclips(clips).with_start(self.start)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L581-L605
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24
] | 100 |
[] | 0 | true | 72.389791 | 25 | 2 | 100 | 12 |
def subfx(self, fx, start_time=0, end_time=None, **kwargs):
left = None if (start_time == 0) else self.subclip(0, start_time)
center = self.subclip(start_time, end_time).fx(fx, **kwargs)
right = None if (end_time is None) else self.subclip(start_time=end_time)
clips = [clip for clip in [left, center, right] if clip is not None]
# beurk, have to find other solution
from moviepy.video.compositing.concatenate import concatenate_videoclips
return concatenate_videoclips(clips).with_start(self.start)
| 28,429 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.image_transform
|
(self, image_func, apply_to=None)
|
return self.transform(lambda get_frame, t: image_func(get_frame(t)), apply_to)
|
Modifies the images of a clip by replacing the frame `get_frame(t)` by
another frame, `image_func(get_frame(t))`.
|
Modifies the images of a clip by replacing the frame `get_frame(t)` by
another frame, `image_func(get_frame(t))`.
| 609 | 614 |
def image_transform(self, image_func, apply_to=None):
"""Modifies the images of a clip by replacing the frame `get_frame(t)` by
another frame, `image_func(get_frame(t))`.
"""
apply_to = apply_to or []
return self.transform(lambda get_frame, t: image_func(get_frame(t)), apply_to)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L609-L614
| 46 |
[
0,
1,
2,
3,
4,
5
] | 100 |
[] | 0 | true | 72.389791 | 6 | 2 | 100 | 2 |
def image_transform(self, image_func, apply_to=None):
apply_to = apply_to or []
return self.transform(lambda get_frame, t: image_func(get_frame(t)), apply_to)
| 28,430 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.fill_array
|
(self, pre_array, shape=(0, 0))
|
return post_array
|
TODO: needs documentation.
|
TODO: needs documentation.
| 619 | 635 |
def fill_array(self, pre_array, shape=(0, 0)):
"""TODO: needs documentation."""
pre_shape = pre_array.shape
dx = shape[0] - pre_shape[0]
dy = shape[1] - pre_shape[1]
post_array = pre_array
if dx < 0:
post_array = pre_array[: shape[0]]
elif dx > 0:
x_1 = [[[1, 1, 1]] * pre_shape[1]] * dx
post_array = np.vstack((pre_array, x_1))
if dy < 0:
post_array = post_array[:, : shape[1]]
elif dy > 0:
x_1 = [[[1, 1, 1]] * dy] * post_array.shape[0]
post_array = np.hstack((post_array, x_1))
return post_array
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L619-L635
| 46 |
[
0,
1
] | 11.764706 |
[
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16
] | 88.235294 | false | 72.389791 | 17 | 5 | 11.764706 | 1 |
def fill_array(self, pre_array, shape=(0, 0)):
pre_shape = pre_array.shape
dx = shape[0] - pre_shape[0]
dy = shape[1] - pre_shape[1]
post_array = pre_array
if dx < 0:
post_array = pre_array[: shape[0]]
elif dx > 0:
x_1 = [[[1, 1, 1]] * pre_shape[1]] * dx
post_array = np.vstack((pre_array, x_1))
if dy < 0:
post_array = post_array[:, : shape[1]]
elif dy > 0:
x_1 = [[[1, 1, 1]] * dy] * post_array.shape[0]
post_array = np.hstack((post_array, x_1))
return post_array
| 28,431 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.blit_on
|
(self, picture, t)
|
return blit(im_img, picture, pos, mask=im_mask)
|
Returns the result of the blit of the clip's frame at time `t`
on the given `picture`, the position of the clip being given
by the clip's ``pos`` attribute. Meant for compositing.
|
Returns the result of the blit of the clip's frame at time `t`
on the given `picture`, the position of the clip being given
by the clip's ``pos`` attribute. Meant for compositing.
| 637 | 702 |
def blit_on(self, picture, t):
"""Returns the result of the blit of the clip's frame at time `t`
on the given `picture`, the position of the clip being given
by the clip's ``pos`` attribute. Meant for compositing.
"""
wf, hf = picture.size
ct = t - self.start # clip time
# GET IMAGE AND MASK IF ANY
img = self.get_frame(ct).astype("uint8")
im_img = Image.fromarray(img)
if self.mask is not None:
mask = (self.mask.get_frame(ct) * 255).astype("uint8")
im_mask = Image.fromarray(mask).convert("L")
if im_img.size != im_mask.size:
bg_size = (
max(im_img.size[0], im_mask.size[0]),
max(im_img.size[1], im_mask.size[1]),
)
im_img_bg = Image.new("RGB", bg_size, "black")
im_img_bg.paste(im_img, (0, 0))
im_mask_bg = Image.new("L", bg_size, 0)
im_mask_bg.paste(im_mask, (0, 0))
im_img, im_mask = im_img_bg, im_mask_bg
else:
im_mask = None
wi, hi = im_img.size
# SET POSITION
pos = self.pos(ct)
# preprocess short writings of the position
if isinstance(pos, str):
pos = {
"center": ["center", "center"],
"left": ["left", "center"],
"right": ["right", "center"],
"top": ["center", "top"],
"bottom": ["center", "bottom"],
}[pos]
else:
pos = list(pos)
# is the position relative (given in % of the clip's size) ?
if self.relative_pos:
for i, dim in enumerate([wf, hf]):
if not isinstance(pos[i], str):
pos[i] = dim * pos[i]
if isinstance(pos[0], str):
D = {"left": 0, "center": (wf - wi) / 2, "right": wf - wi}
pos[0] = D[pos[0]]
if isinstance(pos[1], str):
D = {"top": 0, "center": (hf - hi) / 2, "bottom": hf - hi}
pos[1] = D[pos[1]]
pos = map(int, pos)
return blit(im_img, picture, pos, mask=im_mask)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L637-L702
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65
] | 95.454545 |
[
52,
53,
54
] | 4.545455 | false | 72.389791 | 66 | 9 | 95.454545 | 3 |
def blit_on(self, picture, t):
wf, hf = picture.size
ct = t - self.start # clip time
# GET IMAGE AND MASK IF ANY
img = self.get_frame(ct).astype("uint8")
im_img = Image.fromarray(img)
if self.mask is not None:
mask = (self.mask.get_frame(ct) * 255).astype("uint8")
im_mask = Image.fromarray(mask).convert("L")
if im_img.size != im_mask.size:
bg_size = (
max(im_img.size[0], im_mask.size[0]),
max(im_img.size[1], im_mask.size[1]),
)
im_img_bg = Image.new("RGB", bg_size, "black")
im_img_bg.paste(im_img, (0, 0))
im_mask_bg = Image.new("L", bg_size, 0)
im_mask_bg.paste(im_mask, (0, 0))
im_img, im_mask = im_img_bg, im_mask_bg
else:
im_mask = None
wi, hi = im_img.size
# SET POSITION
pos = self.pos(ct)
# preprocess short writings of the position
if isinstance(pos, str):
pos = {
"center": ["center", "center"],
"left": ["left", "center"],
"right": ["right", "center"],
"top": ["center", "top"],
"bottom": ["center", "bottom"],
}[pos]
else:
pos = list(pos)
# is the position relative (given in % of the clip's size) ?
if self.relative_pos:
for i, dim in enumerate([wf, hf]):
if not isinstance(pos[i], str):
pos[i] = dim * pos[i]
if isinstance(pos[0], str):
D = {"left": 0, "center": (wf - wi) / 2, "right": wf - wi}
pos[0] = D[pos[0]]
if isinstance(pos[1], str):
D = {"top": 0, "center": (hf - hi) / 2, "bottom": hf - hi}
pos[1] = D[pos[1]]
pos = map(int, pos)
return blit(im_img, picture, pos, mask=im_mask)
| 28,432 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.add_mask
|
(self)
|
Add a mask VideoClip to the VideoClip.
Returns a copy of the clip with a completely opaque mask
(made of ones). This makes computations slower compared to
having a None mask but can be useful in many cases. Choose
Set ``constant_size`` to `False` for clips with moving
image size.
|
Add a mask VideoClip to the VideoClip.
| 704 | 723 |
def add_mask(self):
"""Add a mask VideoClip to the VideoClip.
Returns a copy of the clip with a completely opaque mask
(made of ones). This makes computations slower compared to
having a None mask but can be useful in many cases. Choose
Set ``constant_size`` to `False` for clips with moving
image size.
"""
if self.has_constant_size:
mask = ColorClip(self.size, 1.0, is_mask=True)
return self.with_mask(mask.with_duration(self.duration))
else:
def make_frame(t):
return np.ones(self.get_frame(t).shape[:2], dtype=float)
mask = VideoClip(is_mask=True, make_frame=make_frame)
return self.with_mask(mask.with_duration(self.duration))
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L704-L723
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14
] | 75 |
[
15,
16,
18,
19
] | 20 | false | 72.389791 | 20 | 3 | 80 | 8 |
def add_mask(self):
if self.has_constant_size:
mask = ColorClip(self.size, 1.0, is_mask=True)
return self.with_mask(mask.with_duration(self.duration))
else:
def make_frame(t):
return np.ones(self.get_frame(t).shape[:2], dtype=float)
mask = VideoClip(is_mask=True, make_frame=make_frame)
return self.with_mask(mask.with_duration(self.duration))
| 28,433 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.on_color
|
(self, size=None, color=(0, 0, 0), pos=None, col_opacity=None)
|
return result
|
Place the clip on a colored background.
Returns a clip made of the current clip overlaid on a color
clip of a possibly bigger size. Can serve to flatten transparent
clips.
Parameters
----------
size
Size (width, height) in pixels of the final clip.
By default it will be the size of the current clip.
color
Background color of the final clip ([R,G,B]).
pos
Position of the clip in the final clip. 'center' is the default
col_opacity
Parameter in 0..1 indicating the opacity of the colored
background.
|
Place the clip on a colored background.
| 725 | 776 |
def on_color(self, size=None, color=(0, 0, 0), pos=None, col_opacity=None):
"""Place the clip on a colored background.
Returns a clip made of the current clip overlaid on a color
clip of a possibly bigger size. Can serve to flatten transparent
clips.
Parameters
----------
size
Size (width, height) in pixels of the final clip.
By default it will be the size of the current clip.
color
Background color of the final clip ([R,G,B]).
pos
Position of the clip in the final clip. 'center' is the default
col_opacity
Parameter in 0..1 indicating the opacity of the colored
background.
"""
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
if size is None:
size = self.size
if pos is None:
pos = "center"
if col_opacity is not None:
colorclip = ColorClip(
size, color=color, duration=self.duration
).with_opacity(col_opacity)
result = CompositeVideoClip([colorclip, self.with_position(pos)])
else:
result = CompositeVideoClip(
[self.with_position(pos)], size=size, bg_color=color
)
if (
isinstance(self, ImageClip)
and (not hasattr(pos, "__call__"))
and ((self.mask is None) or isinstance(self.mask, ImageClip))
):
new_result = result.to_ImageClip()
if result.mask is not None:
new_result.mask = result.mask.to_ImageClip()
return new_result.with_duration(result.duration)
return result
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L725-L776
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
49,
50,
51
] | 96.153846 |
[
27,
48
] | 3.846154 | false | 72.389791 | 52 | 9 | 96.153846 | 22 |
def on_color(self, size=None, color=(0, 0, 0), pos=None, col_opacity=None):
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
if size is None:
size = self.size
if pos is None:
pos = "center"
if col_opacity is not None:
colorclip = ColorClip(
size, color=color, duration=self.duration
).with_opacity(col_opacity)
result = CompositeVideoClip([colorclip, self.with_position(pos)])
else:
result = CompositeVideoClip(
[self.with_position(pos)], size=size, bg_color=color
)
if (
isinstance(self, ImageClip)
and (not hasattr(pos, "__call__"))
and ((self.mask is None) or isinstance(self.mask, ImageClip))
):
new_result = result.to_ImageClip()
if result.mask is not None:
new_result.mask = result.mask.to_ImageClip()
return new_result.with_duration(result.duration)
return result
| 28,434 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.with_make_frame
|
(self, mf)
|
Change the clip's ``get_frame``.
Returns a copy of the VideoClip instance, with the make_frame
attribute set to `mf`.
|
Change the clip's ``get_frame``.
| 779 | 786 |
def with_make_frame(self, mf):
"""Change the clip's ``get_frame``.
Returns a copy of the VideoClip instance, with the make_frame
attribute set to `mf`.
"""
self.make_frame = mf
self.size = self.get_frame(0).shape[:2][::-1]
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L779-L786
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 100 |
[] | 0 | true | 72.389791 | 8 | 1 | 100 | 4 |
def with_make_frame(self, mf):
self.make_frame = mf
self.size = self.get_frame(0).shape[:2][::-1]
| 28,435 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.with_audio
|
(self, audioclip)
|
Attach an AudioClip to the VideoClip.
Returns a copy of the VideoClip instance, with the `audio`
attribute set to ``audio``, which must be an AudioClip instance.
|
Attach an AudioClip to the VideoClip.
| 789 | 795 |
def with_audio(self, audioclip):
"""Attach an AudioClip to the VideoClip.
Returns a copy of the VideoClip instance, with the `audio`
attribute set to ``audio``, which must be an AudioClip instance.
"""
self.audio = audioclip
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L789-L795
| 46 |
[
0,
1,
2,
3,
4,
5,
6
] | 100 |
[] | 0 | true | 72.389791 | 7 | 1 | 100 | 4 |
def with_audio(self, audioclip):
self.audio = audioclip
| 28,436 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.with_mask
|
(self, mask)
|
Set the clip's mask.
Returns a copy of the VideoClip with the mask attribute set to
``mask``, which must be a greyscale (values in 0-1) VideoClip.
|
Set the clip's mask.
| 798 | 805 |
def with_mask(self, mask):
"""Set the clip's mask.
Returns a copy of the VideoClip with the mask attribute set to
``mask``, which must be a greyscale (values in 0-1) VideoClip.
"""
assert mask is None or mask.is_mask
self.mask = mask
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L798-L805
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 100 |
[] | 0 | true | 72.389791 | 8 | 3 | 100 | 4 |
def with_mask(self, mask):
assert mask is None or mask.is_mask
self.mask = mask
| 28,437 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.with_opacity
|
(self, opacity)
|
Set the opacity/transparency level of the clip.
Returns a semi-transparent copy of the clip where the mask is
multiplied by ``op`` (any float, normally between 0 and 1).
|
Set the opacity/transparency level of the clip.
| 809 | 815 |
def with_opacity(self, opacity):
"""Set the opacity/transparency level of the clip.
Returns a semi-transparent copy of the clip where the mask is
multiplied by ``op`` (any float, normally between 0 and 1).
"""
self.mask = self.mask.image_transform(lambda pic: opacity * pic)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L809-L815
| 46 |
[
0,
1,
2,
3,
4,
5,
6
] | 100 |
[] | 0 | true | 72.389791 | 7 | 1 | 100 | 4 |
def with_opacity(self, opacity):
self.mask = self.mask.image_transform(lambda pic: opacity * pic)
| 28,438 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.with_position
|
(self, pos, relative=False)
|
Set the clip's position in compositions.
Sets the position that the clip will have when included
in compositions. The argument ``pos`` can be either a couple
``(x,y)`` or a function ``t-> (x,y)``. `x` and `y` mark the
location of the top left corner of the clip, and can be
of several types.
Examples
--------
>>> clip.with_position((45,150)) # x=45, y=150
>>>
>>> # clip horizontally centered, at the top of the picture
>>> clip.with_position(("center","top"))
>>>
>>> # clip is at 40% of the width, 70% of the height:
>>> clip.with_position((0.4,0.7), relative=True)
>>>
>>> # clip's position is horizontally centered, and moving up !
>>> clip.with_position(lambda t: ('center', 50+t) )
|
Set the clip's position in compositions.
| 819 | 847 |
def with_position(self, pos, relative=False):
"""Set the clip's position in compositions.
Sets the position that the clip will have when included
in compositions. The argument ``pos`` can be either a couple
``(x,y)`` or a function ``t-> (x,y)``. `x` and `y` mark the
location of the top left corner of the clip, and can be
of several types.
Examples
--------
>>> clip.with_position((45,150)) # x=45, y=150
>>>
>>> # clip horizontally centered, at the top of the picture
>>> clip.with_position(("center","top"))
>>>
>>> # clip is at 40% of the width, 70% of the height:
>>> clip.with_position((0.4,0.7), relative=True)
>>>
>>> # clip's position is horizontally centered, and moving up !
>>> clip.with_position(lambda t: ('center', 50+t) )
"""
self.relative_pos = relative
if hasattr(pos, "__call__"):
self.pos = pos
else:
self.pos = lambda t: pos
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L819-L847
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28
] | 100 |
[] | 0 | true | 72.389791 | 29 | 2 | 100 | 21 |
def with_position(self, pos, relative=False):
self.relative_pos = relative
if hasattr(pos, "__call__"):
self.pos = pos
else:
self.pos = lambda t: pos
| 28,439 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.with_layer
|
(self, layer)
|
Set the clip's layer in compositions. Clips with a greater ``layer``
attribute will be displayed on top of others.
Note: Only has effect when the clip is used in a CompositeVideoClip.
|
Set the clip's layer in compositions. Clips with a greater ``layer``
attribute will be displayed on top of others.
| 851 | 857 |
def with_layer(self, layer):
"""Set the clip's layer in compositions. Clips with a greater ``layer``
attribute will be displayed on top of others.
Note: Only has effect when the clip is used in a CompositeVideoClip.
"""
self.layer = layer
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L851-L857
| 46 |
[
0,
1,
2,
3,
4,
5,
6
] | 100 |
[] | 0 | true | 72.389791 | 7 | 1 | 100 | 4 |
def with_layer(self, layer):
self.layer = layer
| 28,440 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.to_ImageClip
|
(self, t=0, with_mask=True, duration=None)
|
return new_clip
|
Returns an ImageClip made out of the clip's frame at time ``t``,
which can be expressed in seconds (15.35), in (min, sec),
in (hour, min, sec), or as a string: '01:03:05.35'.
|
Returns an ImageClip made out of the clip's frame at time ``t``,
which can be expressed in seconds (15.35), in (min, sec),
in (hour, min, sec), or as a string: '01:03:05.35'.
| 863 | 872 |
def to_ImageClip(self, t=0, with_mask=True, duration=None):
"""
Returns an ImageClip made out of the clip's frame at time ``t``,
which can be expressed in seconds (15.35), in (min, sec),
in (hour, min, sec), or as a string: '01:03:05.35'.
"""
new_clip = ImageClip(self.get_frame(t), is_mask=self.is_mask, duration=duration)
if with_mask and self.mask is not None:
new_clip.mask = self.mask.to_ImageClip(t)
return new_clip
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L863-L872
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9
] | 100 |
[] | 0 | true | 72.389791 | 10 | 3 | 100 | 3 |
def to_ImageClip(self, t=0, with_mask=True, duration=None):
new_clip = ImageClip(self.get_frame(t), is_mask=self.is_mask, duration=duration)
if with_mask and self.mask is not None:
new_clip.mask = self.mask.to_ImageClip(t)
return new_clip
| 28,441 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.to_mask
|
(self, canal=0)
|
Return a mask a video clip made from the clip.
|
Return a mask a video clip made from the clip.
| 874 | 881 |
def to_mask(self, canal=0):
"""Return a mask a video clip made from the clip."""
if self.is_mask:
return self
else:
new_clip = self.image_transform(lambda pic: 1.0 * pic[:, :, canal] / 255)
new_clip.is_mask = True
return new_clip
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L874-L881
| 46 |
[
0,
1
] | 25 |
[
2,
3,
5,
6,
7
] | 62.5 | false | 72.389791 | 8 | 2 | 37.5 | 1 |
def to_mask(self, canal=0):
if self.is_mask:
return self
else:
new_clip = self.image_transform(lambda pic: 1.0 * pic[:, :, canal] / 255)
new_clip.is_mask = True
return new_clip
| 28,442 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.to_RGB
|
(self)
|
Return a non-mask video clip made from the mask video clip.
|
Return a non-mask video clip made from the mask video clip.
| 883 | 892 |
def to_RGB(self):
"""Return a non-mask video clip made from the mask video clip."""
if self.is_mask:
new_clip = self.image_transform(
lambda pic: np.dstack(3 * [255 * pic]).astype("uint8")
)
new_clip.is_mask = False
return new_clip
else:
return self
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L883-L892
| 46 |
[
0,
1
] | 20 |
[
2,
3,
6,
7,
9
] | 50 | false | 72.389791 | 10 | 2 | 50 | 1 |
def to_RGB(self):
if self.is_mask:
new_clip = self.image_transform(
lambda pic: np.dstack(3 * [255 * pic]).astype("uint8")
)
new_clip.is_mask = False
return new_clip
else:
return self
| 28,443 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.without_audio
|
(self)
|
Remove the clip's audio.
Return a copy of the clip with audio set to None.
|
Remove the clip's audio.
| 898 | 903 |
def without_audio(self):
"""Remove the clip's audio.
Return a copy of the clip with audio set to None.
"""
self.audio = None
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L898-L903
| 46 |
[
0,
1,
2,
3,
4,
5
] | 100 |
[] | 0 | true | 72.389791 | 6 | 1 | 100 | 3 |
def without_audio(self):
self.audio = None
| 28,444 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
VideoClip.afx
|
(self, fun, *args, **kwargs)
|
Transform the clip's audio.
Return a new clip whose audio has been transformed by ``fun``.
|
Transform the clip's audio.
| 906 | 911 |
def afx(self, fun, *args, **kwargs):
"""Transform the clip's audio.
Return a new clip whose audio has been transformed by ``fun``.
"""
self.audio = self.audio.fx(fun, *args, **kwargs)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L906-L911
| 46 |
[
0,
1,
2,
3,
4
] | 83.333333 |
[
5
] | 16.666667 | false | 72.389791 | 6 | 1 | 83.333333 | 3 |
def afx(self, fun, *args, **kwargs):
self.audio = self.audio.fx(fun, *args, **kwargs)
| 28,445 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
DataVideoClip.__init__
|
(self, data, data_to_frame, fps, is_mask=False, has_constant_size=True)
| 931 | 945 |
def __init__(self, data, data_to_frame, fps, is_mask=False, has_constant_size=True):
self.data = data
self.data_to_frame = data_to_frame
self.fps = fps
def make_frame(t):
return self.data_to_frame(self.data[int(self.fps * t)])
VideoClip.__init__(
self,
make_frame,
is_mask=is_mask,
duration=1.0 * len(data) / fps,
has_constant_size=has_constant_size,
)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L931-L945
| 46 |
[
0
] | 6.666667 |
[
1,
2,
3,
5,
6,
8
] | 40 | false | 72.389791 | 15 | 2 | 60 | 0 |
def __init__(self, data, data_to_frame, fps, is_mask=False, has_constant_size=True):
self.data = data
self.data_to_frame = data_to_frame
self.fps = fps
def make_frame(t):
return self.data_to_frame(self.data[int(self.fps * t)])
VideoClip.__init__(
self,
make_frame,
is_mask=is_mask,
duration=1.0 * len(data) / fps,
has_constant_size=has_constant_size,
)
| 28,446 |
|||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
UpdatedVideoClip.__init__
|
(self, world, is_mask=False, duration=None)
| 980 | 990 |
def __init__(self, world, is_mask=False, duration=None):
self.world = world
def make_frame(t):
while self.world.clip_t < t:
world.update()
return world.to_frame()
VideoClip.__init__(
self, make_frame=make_frame, is_mask=is_mask, duration=duration
)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L980-L990
| 46 |
[
0
] | 9.090909 |
[
1,
3,
4,
5,
6,
8
] | 54.545455 | false | 72.389791 | 11 | 3 | 45.454545 | 0 |
def __init__(self, world, is_mask=False, duration=None):
self.world = world
def make_frame(t):
while self.world.clip_t < t:
world.update()
return world.to_frame()
VideoClip.__init__(
self, make_frame=make_frame, is_mask=is_mask, duration=duration
)
| 28,447 |
|||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
ImageClip.__init__
|
(
self, img, is_mask=False, transparent=True, fromalpha=False, duration=None
)
| 1,037 | 1,063 |
def __init__(
self, img, is_mask=False, transparent=True, fromalpha=False, duration=None
):
VideoClip.__init__(self, is_mask=is_mask, duration=duration)
if not isinstance(img, np.ndarray):
# img is a string or path-like object, so read it in from disk
img = imread(img)
if len(img.shape) == 3: # img is (now) a RGB(a) numpy array
if img.shape[2] == 4:
if fromalpha:
img = 1.0 * img[:, :, 3] / 255
elif is_mask:
img = 1.0 * img[:, :, 0] / 255
elif transparent:
self.mask = ImageClip(1.0 * img[:, :, 3] / 255, is_mask=True)
img = img[:, :, :3]
elif is_mask:
img = 1.0 * img[:, :, 0] / 255
# if the image was just a 2D mask, it should arrive here
# unchanged
self.make_frame = lambda t: img
self.size = img.shape[:2][::-1]
self.img = img
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L1037-L1063
| 46 |
[
0,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
14,
15,
16,
17,
18,
19,
23,
24,
25,
26
] | 77.777778 |
[
13,
20
] | 7.407407 | false | 72.389791 | 27 | 8 | 92.592593 | 0 |
def __init__(
self, img, is_mask=False, transparent=True, fromalpha=False, duration=None
):
VideoClip.__init__(self, is_mask=is_mask, duration=duration)
if not isinstance(img, np.ndarray):
# img is a string or path-like object, so read it in from disk
img = imread(img)
if len(img.shape) == 3: # img is (now) a RGB(a) numpy array
if img.shape[2] == 4:
if fromalpha:
img = 1.0 * img[:, :, 3] / 255
elif is_mask:
img = 1.0 * img[:, :, 0] / 255
elif transparent:
self.mask = ImageClip(1.0 * img[:, :, 3] / 255, is_mask=True)
img = img[:, :, :3]
elif is_mask:
img = 1.0 * img[:, :, 0] / 255
# if the image was just a 2D mask, it should arrive here
# unchanged
self.make_frame = lambda t: img
self.size = img.shape[:2][::-1]
self.img = img
| 28,448 |
|||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
ImageClip.transform
|
(self, func, apply_to=None, keep_duration=True)
|
return new_clip
|
General transformation filter.
Equivalent to VideoClip.transform. The result is no more an
ImageClip, it has the class VideoClip (since it may be animated)
|
General transformation filter.
| 1,065 | 1,079 |
def transform(self, func, apply_to=None, keep_duration=True):
"""General transformation filter.
Equivalent to VideoClip.transform. The result is no more an
ImageClip, it has the class VideoClip (since it may be animated)
"""
if apply_to is None:
apply_to = []
# When we use transform on an image clip it may become animated.
# Therefore the result is not an ImageClip, just a VideoClip.
new_clip = VideoClip.transform(
self, func, apply_to=apply_to, keep_duration=keep_duration
)
new_clip.__class__ = VideoClip
return new_clip
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L1065-L1079
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14
] | 100 |
[] | 0 | true | 72.389791 | 15 | 2 | 100 | 4 |
def transform(self, func, apply_to=None, keep_duration=True):
if apply_to is None:
apply_to = []
# When we use transform on an image clip it may become animated.
# Therefore the result is not an ImageClip, just a VideoClip.
new_clip = VideoClip.transform(
self, func, apply_to=apply_to, keep_duration=keep_duration
)
new_clip.__class__ = VideoClip
return new_clip
| 28,449 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
ImageClip.image_transform
|
(self, image_func, apply_to=None)
|
Image-transformation filter.
Does the same as VideoClip.image_transform, but for ImageClip the
transformed clip is computed once and for all at the beginning,
and not for each 'frame'.
|
Image-transformation filter.
| 1,082 | 1,100 |
def image_transform(self, image_func, apply_to=None):
"""Image-transformation filter.
Does the same as VideoClip.image_transform, but for ImageClip the
transformed clip is computed once and for all at the beginning,
and not for each 'frame'.
"""
if apply_to is None:
apply_to = []
arr = image_func(self.get_frame(0))
self.size = arr.shape[:2][::-1]
self.make_frame = lambda t: arr
self.img = arr
for attr in apply_to:
a = getattr(self, attr, None)
if a is not None:
new_a = a.image_transform(image_func)
setattr(self, attr, new_a)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L1082-L1100
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14
] | 78.947368 |
[
15,
16,
17,
18
] | 21.052632 | false | 72.389791 | 19 | 4 | 78.947368 | 5 |
def image_transform(self, image_func, apply_to=None):
if apply_to is None:
apply_to = []
arr = image_func(self.get_frame(0))
self.size = arr.shape[:2][::-1]
self.make_frame = lambda t: arr
self.img = arr
for attr in apply_to:
a = getattr(self, attr, None)
if a is not None:
new_a = a.image_transform(image_func)
setattr(self, attr, new_a)
| 28,450 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
ImageClip.time_transform
|
(self, time_func, apply_to=None, keep_duration=False)
|
Time-transformation filter.
Applies a transformation to the clip's timeline
(see Clip.time_transform).
This method does nothing for ImageClips (but it may affect their
masks or their audios). The result is still an ImageClip.
|
Time-transformation filter.
| 1,103 | 1,118 |
def time_transform(self, time_func, apply_to=None, keep_duration=False):
"""Time-transformation filter.
Applies a transformation to the clip's timeline
(see Clip.time_transform).
This method does nothing for ImageClips (but it may affect their
masks or their audios). The result is still an ImageClip.
"""
if apply_to is None:
apply_to = ["mask", "audio"]
for attr in apply_to:
a = getattr(self, attr, None)
if a is not None:
new_a = a.time_transform(time_func)
setattr(self, attr, new_a)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L1103-L1118
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
11
] | 68.75 |
[
10,
12,
13,
14,
15
] | 31.25 | false | 72.389791 | 16 | 4 | 68.75 | 7 |
def time_transform(self, time_func, apply_to=None, keep_duration=False):
if apply_to is None:
apply_to = ["mask", "audio"]
for attr in apply_to:
a = getattr(self, attr, None)
if a is not None:
new_a = a.time_transform(time_func)
setattr(self, attr, new_a)
| 28,451 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
ColorClip.__init__
|
(self, size, color=None, is_mask=False, duration=None)
| 1,140 | 1,158 |
def __init__(self, size, color=None, is_mask=False, duration=None):
w, h = size
if is_mask:
shape = (h, w)
if color is None:
color = 0
elif not np.isscalar(color):
raise Exception("Color has to be a scalar when mask is true")
else:
if color is None:
color = (0, 0, 0)
elif not hasattr(color, "__getitem__"):
raise Exception("Color has to contain RGB of the clip")
shape = (h, w, len(color))
super().__init__(
np.tile(color, w * h).reshape(shape), is_mask=is_mask, duration=duration
)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L1140-L1158
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
10,
11,
12,
13,
14,
15,
16
] | 84.210526 |
[] | 0 | false | 72.389791 | 19 | 6 | 100 | 0 |
def __init__(self, size, color=None, is_mask=False, duration=None):
w, h = size
if is_mask:
shape = (h, w)
if color is None:
color = 0
elif not np.isscalar(color):
raise Exception("Color has to be a scalar when mask is true")
else:
if color is None:
color = (0, 0, 0)
elif not hasattr(color, "__getitem__"):
raise Exception("Color has to contain RGB of the clip")
shape = (h, w, len(color))
super().__init__(
np.tile(color, w * h).reshape(shape), is_mask=is_mask, duration=duration
)
| 28,452 |
|||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
TextClip.__init__
|
(
self,
text=None,
filename=None,
size=None,
color="black",
bg_color="transparent",
font_size=None,
font="Courier",
stroke_color=None,
stroke_width=1,
method="label",
kerning=None,
align="center",
interline=None,
tempfilename=None,
temptxt=None,
transparent=True,
remove_temp=True,
print_cmd=False,
)
| 1,226 | 1,330 |
def __init__(
self,
text=None,
filename=None,
size=None,
color="black",
bg_color="transparent",
font_size=None,
font="Courier",
stroke_color=None,
stroke_width=1,
method="label",
kerning=None,
align="center",
interline=None,
tempfilename=None,
temptxt=None,
transparent=True,
remove_temp=True,
print_cmd=False,
):
if text is not None:
if temptxt is None:
temptxt_fd, temptxt = tempfile.mkstemp(suffix=".txt")
try: # only in Python3 will this work
os.write(temptxt_fd, bytes(text, "UTF8"))
except TypeError: # oops, fall back to Python2
os.write(temptxt_fd, text)
os.close(temptxt_fd)
text = "@" + temptxt
elif filename is not None:
# use a file instead of a text.
text = "@" + filename
else:
raise ValueError(
"You must provide either 'text' or 'filename' arguments to TextClip"
)
if size is not None:
size = (
"" if size[0] is None else str(size[0]),
"" if size[1] is None else str(size[1]),
)
cmd = [
IMAGEMAGICK_BINARY,
"-background",
bg_color,
"-fill",
color,
"-font",
font,
]
if font_size is not None:
cmd += ["-pointsize", "%d" % font_size]
if kerning is not None:
cmd += ["-kerning", "%0.1f" % kerning]
if stroke_color is not None:
cmd += ["-stroke", stroke_color, "-strokewidth", "%.01f" % stroke_width]
if size is not None:
cmd += ["-size", "%sx%s" % (size[0], size[1])]
if align is not None:
cmd += ["-gravity", align]
if interline is not None:
cmd += ["-interline-spacing", "%d" % interline]
if tempfilename is None:
tempfile_fd, tempfilename = tempfile.mkstemp(suffix=".png")
os.close(tempfile_fd)
cmd += [
"%s:%s" % (method, text),
"-type",
"truecolormatte",
"PNG32:%s" % tempfilename,
]
if print_cmd:
print(" ".join(cmd))
try:
subprocess_call(cmd, logger=None)
except (IOError, OSError) as err:
error = (
f"MoviePy Error: creation of {filename} failed because of the "
f"following error:\n\n{err}.\n\n."
"This error can be due to the fact that ImageMagick "
"is not installed on your computer, or (for Windows "
"users) that you didn't specify the path to the "
"ImageMagick binary. Check the documentation."
)
raise IOError(error)
ImageClip.__init__(self, tempfilename, transparent=transparent)
self.text = text
self.color = color
self.stroke_color = stroke_color
if remove_temp:
if tempfilename is not None and os.path.exists(tempfilename):
os.remove(tempfilename)
if temptxt is not None and os.path.exists(temptxt):
os.remove(temptxt)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L1226-L1330
| 46 |
[
0,
21,
22,
31,
32,
35
] | 5.714286 |
[
23,
24,
25,
26,
27,
28,
29,
30,
33,
39,
40,
45,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
68,
69,
70,
72,
79,
80,
82,
83,
84,
85,
93,
95,
96,
97,
98,
100,
101,
102,
103,
104
] | 41.904762 | false | 72.389791 | 105 | 20 | 58.095238 | 0 |
def __init__(
self,
text=None,
filename=None,
size=None,
color="black",
bg_color="transparent",
font_size=None,
font="Courier",
stroke_color=None,
stroke_width=1,
method="label",
kerning=None,
align="center",
interline=None,
tempfilename=None,
temptxt=None,
transparent=True,
remove_temp=True,
print_cmd=False,
):
if text is not None:
if temptxt is None:
temptxt_fd, temptxt = tempfile.mkstemp(suffix=".txt")
try: # only in Python3 will this work
os.write(temptxt_fd, bytes(text, "UTF8"))
except TypeError: # oops, fall back to Python2
os.write(temptxt_fd, text)
os.close(temptxt_fd)
text = "@" + temptxt
elif filename is not None:
# use a file instead of a text.
text = "@" + filename
else:
raise ValueError(
"You must provide either 'text' or 'filename' arguments to TextClip"
)
if size is not None:
size = (
"" if size[0] is None else str(size[0]),
"" if size[1] is None else str(size[1]),
)
cmd = [
IMAGEMAGICK_BINARY,
"-background",
bg_color,
"-fill",
color,
"-font",
font,
]
if font_size is not None:
cmd += ["-pointsize", "%d" % font_size]
if kerning is not None:
cmd += ["-kerning", "%0.1f" % kerning]
if stroke_color is not None:
cmd += ["-stroke", stroke_color, "-strokewidth", "%.01f" % stroke_width]
if size is not None:
cmd += ["-size", "%sx%s" % (size[0], size[1])]
if align is not None:
cmd += ["-gravity", align]
if interline is not None:
cmd += ["-interline-spacing", "%d" % interline]
if tempfilename is None:
tempfile_fd, tempfilename = tempfile.mkstemp(suffix=".png")
os.close(tempfile_fd)
cmd += [
"%s:%s" % (method, text),
"-type",
"truecolormatte",
"PNG32:%s" % tempfilename,
]
if print_cmd:
print(" ".join(cmd))
try:
subprocess_call(cmd, logger=None)
except (IOError, OSError) as err:
error = (
f"MoviePy Error: creation of {filename} failed because of the "
f"following error:\n\n{err}.\n\n."
"This error can be due to the fact that ImageMagick "
"is not installed on your computer, or (for Windows "
"users) that you didn't specify the path to the "
"ImageMagick binary. Check the documentation."
)
raise IOError(error)
ImageClip.__init__(self, tempfilename, transparent=transparent)
self.text = text
self.color = color
self.stroke_color = stroke_color
if remove_temp:
if tempfilename is not None and os.path.exists(tempfilename):
os.remove(tempfilename)
if temptxt is not None and os.path.exists(temptxt):
os.remove(temptxt)
| 28,453 |
|||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
TextClip.list
|
(arg)
|
Returns a list of all valid entries for the ``font`` or ``color`` argument of
``TextClip``.
|
Returns a list of all valid entries for the ``font`` or ``color`` argument of
``TextClip``.
| 1,333 | 1,356 |
def list(arg):
"""Returns a list of all valid entries for the ``font`` or ``color`` argument of
``TextClip``.
"""
popen_params = cross_platform_popen_params(
{"stdout": sp.PIPE, "stderr": sp.DEVNULL, "stdin": sp.DEVNULL}
)
process = sp.Popen(
[IMAGEMAGICK_BINARY, "-list", arg], encoding="utf-8", **popen_params
)
result = process.communicate()[0]
lines = result.splitlines()
if arg == "font":
# Slice removes first 8 characters: " Font: "
return [line[8:] for line in lines if line.startswith(" Font:")]
elif arg == "color":
# Each line is of the format "aqua srgb(0,255,255) SVG" so split
# on space and take the first item to get the color name.
# The first 5 lines are header information, not colors, so ignore
return [line.split(" ")[0] for line in lines[5:]]
else:
raise Exception("Moviepy Error: Argument must equal 'font' or 'color'")
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L1333-L1356
| 46 |
[
0,
1,
2,
3
] | 16.666667 |
[
4,
8,
11,
12,
14,
16,
17,
21,
23
] | 37.5 | false | 72.389791 | 24 | 5 | 62.5 | 2 |
def list(arg):
popen_params = cross_platform_popen_params(
{"stdout": sp.PIPE, "stderr": sp.DEVNULL, "stdin": sp.DEVNULL}
)
process = sp.Popen(
[IMAGEMAGICK_BINARY, "-list", arg], encoding="utf-8", **popen_params
)
result = process.communicate()[0]
lines = result.splitlines()
if arg == "font":
# Slice removes first 8 characters: " Font: "
return [line[8:] for line in lines if line.startswith(" Font:")]
elif arg == "color":
# Each line is of the format "aqua srgb(0,255,255) SVG" so split
# on space and take the first item to get the color name.
# The first 5 lines are header information, not colors, so ignore
return [line.split(" ")[0] for line in lines[5:]]
else:
raise Exception("Moviepy Error: Argument must equal 'font' or 'color'")
| 28,454 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
TextClip.search
|
(string, arg)
|
return [name for name in names_list if string in name.lower()]
|
Returns the of all valid entries which contain ``string`` for the
argument ``arg`` of ``TextClip``, for instance
>>> # Find all the available fonts which contain "Courier"
>>> print(TextClip.search('Courier', 'font'))
|
Returns the of all valid entries which contain ``string`` for the
argument ``arg`` of ``TextClip``, for instance
| 1,359 | 1,368 |
def search(string, arg):
"""Returns the of all valid entries which contain ``string`` for the
argument ``arg`` of ``TextClip``, for instance
>>> # Find all the available fonts which contain "Courier"
>>> print(TextClip.search('Courier', 'font'))
"""
string = string.lower()
names_list = TextClip.list(arg)
return [name for name in names_list if string in name.lower()]
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L1359-L1368
| 46 |
[
0,
1,
2,
3,
4,
5,
6
] | 70 |
[
7,
8,
9
] | 30 | false | 72.389791 | 10 | 2 | 70 | 5 |
def search(string, arg):
string = string.lower()
names_list = TextClip.list(arg)
return [name for name in names_list if string in name.lower()]
| 28,455 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
BitmapClip.__init__
|
(
self, bitmap_frames, *, fps=None, duration=None, color_dict=None, is_mask=False
)
|
Creates a VideoClip object from a bitmap representation. Primarily used
in the test suite.
Parameters
----------
bitmap_frames
A list of frames. Each frame is a list of strings. Each string
represents a row of colors. Each color represents an (r, g, b) tuple.
Example input (2 frames, 5x3 pixel size)::
[["RRRRR",
"RRBRR",
"RRBRR"],
["RGGGR",
"RGGGR",
"RGGGR"]]
fps
The number of frames per second to display the clip at. `duration` will
calculated from the total number of frames. If both `fps` and `duration`
are set, `duration` will be ignored.
duration
The total duration of the clip. `fps` will be calculated from the total
number of frames. If both `fps` and `duration` are set, `duration` will
be ignored.
color_dict
A dictionary that can be used to set specific (r, g, b) values that
correspond to the letters used in ``bitmap_frames``.
eg ``{"A": (50, 150, 150)}``.
Defaults to::
{
"R": (255, 0, 0),
"G": (0, 255, 0),
"B": (0, 0, 255),
"O": (0, 0, 0), # "O" represents black
"W": (255, 255, 255),
# "A", "C", "D", "E", "F" represent arbitrary colors
"A": (89, 225, 62),
"C": (113, 157, 108),
"D": (215, 182, 143),
"E": (57, 26, 252),
}
is_mask
Set to ``True`` if the clip is going to be used as a mask.
|
Creates a VideoClip object from a bitmap representation. Primarily used
in the test suite.
| 1,388 | 1,467 |
def __init__(
self, bitmap_frames, *, fps=None, duration=None, color_dict=None, is_mask=False
):
"""Creates a VideoClip object from a bitmap representation. Primarily used
in the test suite.
Parameters
----------
bitmap_frames
A list of frames. Each frame is a list of strings. Each string
represents a row of colors. Each color represents an (r, g, b) tuple.
Example input (2 frames, 5x3 pixel size)::
[["RRRRR",
"RRBRR",
"RRBRR"],
["RGGGR",
"RGGGR",
"RGGGR"]]
fps
The number of frames per second to display the clip at. `duration` will
calculated from the total number of frames. If both `fps` and `duration`
are set, `duration` will be ignored.
duration
The total duration of the clip. `fps` will be calculated from the total
number of frames. If both `fps` and `duration` are set, `duration` will
be ignored.
color_dict
A dictionary that can be used to set specific (r, g, b) values that
correspond to the letters used in ``bitmap_frames``.
eg ``{"A": (50, 150, 150)}``.
Defaults to::
{
"R": (255, 0, 0),
"G": (0, 255, 0),
"B": (0, 0, 255),
"O": (0, 0, 0), # "O" represents black
"W": (255, 255, 255),
# "A", "C", "D", "E", "F" represent arbitrary colors
"A": (89, 225, 62),
"C": (113, 157, 108),
"D": (215, 182, 143),
"E": (57, 26, 252),
}
is_mask
Set to ``True`` if the clip is going to be used as a mask.
"""
assert fps is not None or duration is not None
self.color_dict = color_dict if color_dict else self.DEFAULT_COLOR_DICT
frame_list = []
for input_frame in bitmap_frames:
output_frame = []
for row in input_frame:
output_frame.append([self.color_dict[color] for color in row])
frame_list.append(np.array(output_frame))
frame_array = np.array(frame_list)
self.total_frames = len(frame_array)
if fps is None:
fps = self.total_frames / duration
else:
duration = self.total_frames / fps
VideoClip.__init__(
self,
make_frame=lambda t: frame_array[int(t * fps)],
is_mask=is_mask,
duration=duration,
)
self.fps = fps
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L1388-L1467
| 46 |
[
0,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79
] | 35 |
[] | 0 | false | 72.389791 | 80 | 7 | 100 | 50 |
def __init__(
self, bitmap_frames, *, fps=None, duration=None, color_dict=None, is_mask=False
):
assert fps is not None or duration is not None
self.color_dict = color_dict if color_dict else self.DEFAULT_COLOR_DICT
frame_list = []
for input_frame in bitmap_frames:
output_frame = []
for row in input_frame:
output_frame.append([self.color_dict[color] for color in row])
frame_list.append(np.array(output_frame))
frame_array = np.array(frame_list)
self.total_frames = len(frame_array)
if fps is None:
fps = self.total_frames / duration
else:
duration = self.total_frames / fps
VideoClip.__init__(
self,
make_frame=lambda t: frame_array[int(t * fps)],
is_mask=is_mask,
duration=duration,
)
self.fps = fps
| 28,456 |
|
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/VideoClip.py
|
BitmapClip.to_bitmap
|
(self, color_dict=None)
|
return bitmap
|
Returns a valid bitmap list that represents each frame of the clip.
If `color_dict` is not specified, then it will use the same `color_dict`
that was used to create the clip.
|
Returns a valid bitmap list that represents each frame of the clip.
If `color_dict` is not specified, then it will use the same `color_dict`
that was used to create the clip.
| 1,469 | 1,487 |
def to_bitmap(self, color_dict=None):
"""Returns a valid bitmap list that represents each frame of the clip.
If `color_dict` is not specified, then it will use the same `color_dict`
that was used to create the clip.
"""
color_dict = color_dict or self.color_dict
bitmap = []
for frame in self.iter_frames():
bitmap.append([])
for line in frame:
bitmap[-1].append("")
for pixel in line:
letter = list(color_dict.keys())[
list(color_dict.values()).index(tuple(pixel))
]
bitmap[-1][-1] += letter
return bitmap
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/VideoClip.py#L1469-L1487
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18
] | 100 |
[] | 0 | true | 72.389791 | 19 | 5 | 100 | 3 |
def to_bitmap(self, color_dict=None):
color_dict = color_dict or self.color_dict
bitmap = []
for frame in self.iter_frames():
bitmap.append([])
for line in frame:
bitmap[-1].append("")
for pixel in line:
letter = list(color_dict.keys())[
list(color_dict.values()).index(tuple(pixel))
]
bitmap[-1][-1] += letter
return bitmap
| 28,457 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/tools/cuts.py
|
find_video_period
|
(clip, fps=None, start_time=0.3)
|
return timings[np.argmax(corrs)]
|
Find the period of a video based on frames correlation.
Parameters
----------
clip : moviepy.Clip.Clip
Clip for which the video period will be computed.
fps : int, optional
Number of frames per second used computing the period. Higher values will
produce more accurate periods, but the execution time will be longer.
start_time : float, optional
First timeframe used to calculate the period of the clip.
Examples
--------
>>> from moviepy.editor import *
>>> from moviepy.video.tools.cuts import find_video_period
>>>
>>> clip = VideoFileClip("media/chaplin.mp4").subclip(0, 1).loop(2)
>>> round(videotools.find_video_period(clip, fps=80), 6)
1
|
Find the period of a video based on frames correlation.
| 12 | 45 |
def find_video_period(clip, fps=None, start_time=0.3):
"""Find the period of a video based on frames correlation.
Parameters
----------
clip : moviepy.Clip.Clip
Clip for which the video period will be computed.
fps : int, optional
Number of frames per second used computing the period. Higher values will
produce more accurate periods, but the execution time will be longer.
start_time : float, optional
First timeframe used to calculate the period of the clip.
Examples
--------
>>> from moviepy.editor import *
>>> from moviepy.video.tools.cuts import find_video_period
>>>
>>> clip = VideoFileClip("media/chaplin.mp4").subclip(0, 1).loop(2)
>>> round(videotools.find_video_period(clip, fps=80), 6)
1
"""
def frame(t):
return clip.get_frame(t).flatten()
timings = np.arange(start_time, clip.duration, 1 / fps)[1:]
ref = frame(0)
corrs = [np.corrcoef(ref, frame(t))[0, 1] for t in timings]
return timings[np.argmax(corrs)]
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/tools/cuts.py#L12-L45
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33
] | 100 |
[] | 0 | true | 98.290598 | 34 | 3 | 100 | 24 |
def find_video_period(clip, fps=None, start_time=0.3):
def frame(t):
return clip.get_frame(t).flatten()
timings = np.arange(start_time, clip.duration, 1 / fps)[1:]
ref = frame(0)
corrs = [np.corrcoef(ref, frame(t))[0, 1] for t in timings]
return timings[np.argmax(corrs)]
| 28,458 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/tools/cuts.py
|
detect_scenes
|
(
clip=None, luminosities=None, luminosity_threshold=10, logger="bar", fps=None
)
|
return cuts, luminosities
|
Detects scenes of a clip based on luminosity changes.
Note that for large clip this may take some time.
Returns
-------
tuple : cuts, luminosities
cuts is a series of cuts [(0,t1), (t1,t2),...(...,tf)]
luminosities are the luminosities computed for each
frame of the clip.
Parameters
----------
clip : video.VideoClip.VideoClip, optional
A video clip. Can be None if a list of luminosities is
provided instead. If provided, the luminosity of each
frame of the clip will be computed. If the clip has no
'fps' attribute, you must provide it.
luminosities : list, optional
A list of luminosities, e.g. returned by detect_scenes
in a previous run.
luminosity_threshold : float, optional
Determines a threshold above which the 'luminosity jumps'
will be considered as scene changes. A scene change is defined
as a change between 2 consecutive frames that is larger than
(avg * thr) where avg is the average of the absolute changes
between consecutive frames.
logger : str, optional
Either ``"bar"`` for progress bar or ``None`` or any Proglog logger.
fps : int, optional
Frames per second value. Must be provided if you provide
no clip or a clip without fps attribute.
|
Detects scenes of a clip based on luminosity changes.
| 454 | 513 |
def detect_scenes(
clip=None, luminosities=None, luminosity_threshold=10, logger="bar", fps=None
):
"""Detects scenes of a clip based on luminosity changes.
Note that for large clip this may take some time.
Returns
-------
tuple : cuts, luminosities
cuts is a series of cuts [(0,t1), (t1,t2),...(...,tf)]
luminosities are the luminosities computed for each
frame of the clip.
Parameters
----------
clip : video.VideoClip.VideoClip, optional
A video clip. Can be None if a list of luminosities is
provided instead. If provided, the luminosity of each
frame of the clip will be computed. If the clip has no
'fps' attribute, you must provide it.
luminosities : list, optional
A list of luminosities, e.g. returned by detect_scenes
in a previous run.
luminosity_threshold : float, optional
Determines a threshold above which the 'luminosity jumps'
will be considered as scene changes. A scene change is defined
as a change between 2 consecutive frames that is larger than
(avg * thr) where avg is the average of the absolute changes
between consecutive frames.
logger : str, optional
Either ``"bar"`` for progress bar or ``None`` or any Proglog logger.
fps : int, optional
Frames per second value. Must be provided if you provide
no clip or a clip without fps attribute.
"""
if luminosities is None:
luminosities = [
f.sum() for f in clip.iter_frames(fps=fps, dtype="uint32", logger=logger)
]
luminosities = np.array(luminosities, dtype=float)
if clip is not None:
end = clip.duration
else:
end = len(luminosities) * (1.0 / fps)
luminosity_diffs = abs(np.diff(luminosities))
avg = luminosity_diffs.mean()
luminosity_jumps = (
1 + np.array(np.nonzero(luminosity_diffs > luminosity_threshold * avg))[0]
)
timings = [0] + list((1.0 / fps) * luminosity_jumps) + [end]
cuts = [(t1, t2) for t1, t2 in zip(timings, timings[1:])]
return cuts, luminosities
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/tools/cuts.py#L454-L513
| 46 |
[
0,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
52,
53,
54,
55,
56,
57,
58,
59
] | 31.666667 |
[
51
] | 1.666667 | false | 98.290598 | 60 | 5 | 98.333333 | 38 |
def detect_scenes(
clip=None, luminosities=None, luminosity_threshold=10, logger="bar", fps=None
):
if luminosities is None:
luminosities = [
f.sum() for f in clip.iter_frames(fps=fps, dtype="uint32", logger=logger)
]
luminosities = np.array(luminosities, dtype=float)
if clip is not None:
end = clip.duration
else:
end = len(luminosities) * (1.0 / fps)
luminosity_diffs = abs(np.diff(luminosities))
avg = luminosity_diffs.mean()
luminosity_jumps = (
1 + np.array(np.nonzero(luminosity_diffs > luminosity_threshold * avg))[0]
)
timings = [0] + list((1.0 / fps) * luminosity_jumps) + [end]
cuts = [(t1, t2) for t1, t2 in zip(timings, timings[1:])]
return cuts, luminosities
| 28,459 |
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/tools/cuts.py
|
FramesMatch.__init__
|
(self, start_time, end_time, min_distance, max_distance)
| 67 | 72 |
def __init__(self, start_time, end_time, min_distance, max_distance):
self.start_time = start_time
self.end_time = end_time
self.min_distance = min_distance
self.max_distance = max_distance
self.time_span = end_time - start_time
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/tools/cuts.py#L67-L72
| 46 |
[
0,
1,
2,
3,
4,
5
] | 100 |
[] | 0 | true | 98.290598 | 6 | 1 | 100 | 0 |
def __init__(self, start_time, end_time, min_distance, max_distance):
self.start_time = start_time
self.end_time = end_time
self.min_distance = min_distance
self.max_distance = max_distance
self.time_span = end_time - start_time
| 28,460 |
|||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/tools/cuts.py
|
FramesMatch.__str__
|
(self)
|
return "(%.04f, %.04f, %.04f, %.04f)" % (
self.start_time,
self.end_time,
self.min_distance,
self.max_distance,
)
| 74 | 80 |
def __str__(self): # pragma: no cover
return "(%.04f, %.04f, %.04f, %.04f)" % (
self.start_time,
self.end_time,
self.min_distance,
self.max_distance,
)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/tools/cuts.py#L74-L80
| 46 |
[] | 0 |
[] | 0 | false | 98.290598 | 7 | 1 | 100 | 0 |
def __str__(self): # pragma: no cover
return "(%.04f, %.04f, %.04f, %.04f)" % (
self.start_time,
self.end_time,
self.min_distance,
self.max_distance,
)
| 28,461 |
||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/tools/cuts.py
|
FramesMatch.__repr__
|
(self)
|
return self.__str__()
| 82 | 83 |
def __repr__(self): # pragma: no cover
return self.__str__()
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/tools/cuts.py#L82-L83
| 46 |
[] | 0 |
[] | 0 | false | 98.290598 | 2 | 1 | 100 | 0 |
def __repr__(self): # pragma: no cover
return self.__str__()
| 28,462 |
||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/tools/cuts.py
|
FramesMatch.__iter__
|
(self)
|
return iter(
(self.start_time, self.end_time, self.min_distance, self.max_distance)
)
| 85 | 88 |
def __iter__(self): # pragma: no cover
return iter(
(self.start_time, self.end_time, self.min_distance, self.max_distance)
)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/tools/cuts.py#L85-L88
| 46 |
[] | 0 |
[] | 0 | false | 98.290598 | 4 | 1 | 100 | 0 |
def __iter__(self): # pragma: no cover
return iter(
(self.start_time, self.end_time, self.min_distance, self.max_distance)
)
| 28,463 |
||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/tools/cuts.py
|
FramesMatch.__eq__
|
(self, other)
|
return (
other.start_time == self.start_time
and other.end_time == self.end_time
and other.min_distance == self.min_distance
and other.max_distance == self.max_distance
)
| 90 | 96 |
def __eq__(self, other):
return (
other.start_time == self.start_time
and other.end_time == self.end_time
and other.min_distance == self.min_distance
and other.max_distance == self.max_distance
)
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/tools/cuts.py#L90-L96
| 46 |
[
0,
1
] | 28.571429 |
[] | 0 | false | 98.290598 | 7 | 4 | 100 | 0 |
def __eq__(self, other):
return (
other.start_time == self.start_time
and other.end_time == self.end_time
and other.min_distance == self.min_distance
and other.max_distance == self.max_distance
)
| 28,464 |
||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/tools/cuts.py
|
FramesMatches.__init__
|
(self, lst)
| 112 | 113 |
def __init__(self, lst):
list.__init__(self, sorted(lst, key=lambda e: e.max_distance))
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/tools/cuts.py#L112-L113
| 46 |
[
0,
1
] | 100 |
[] | 0 | true | 98.290598 | 2 | 1 | 100 | 0 |
def __init__(self, lst):
list.__init__(self, sorted(lst, key=lambda e: e.max_distance))
| 28,465 |
|||
Zulko/moviepy
|
858bb81fba7e09f1f562283ed6d1394db883b6c7
|
moviepy/video/tools/cuts.py
|
FramesMatches.best
|
(self, n=1, percent=None)
|
return self[0] if n == 1 else FramesMatches(self[: int(n)])
|
Returns a new instance of FramesMatches object or a FramesMatch
from the current class instance given different conditions.
By default returns the first FramesMatch that the current instance
stores.
Parameters
----------
n : int, optional
Number of matches to retrieve from the current FramesMatches object.
Only has effect when ``percent=None``.
percent : float, optional
Percent of the current match to retrieve.
Returns
-------
FramesMatch or FramesMatches : If the number of matches to retrieve is
greater than 1 returns a FramesMatches object, otherwise a
FramesMatch.
|
Returns a new instance of FramesMatches object or a FramesMatch
from the current class instance given different conditions.
| 115 | 142 |
def best(self, n=1, percent=None):
"""Returns a new instance of FramesMatches object or a FramesMatch
from the current class instance given different conditions.
By default returns the first FramesMatch that the current instance
stores.
Parameters
----------
n : int, optional
Number of matches to retrieve from the current FramesMatches object.
Only has effect when ``percent=None``.
percent : float, optional
Percent of the current match to retrieve.
Returns
-------
FramesMatch or FramesMatches : If the number of matches to retrieve is
greater than 1 returns a FramesMatches object, otherwise a
FramesMatch.
"""
if percent is not None:
n = len(self) * percent / 100
return self[0] if n == 1 else FramesMatches(self[: int(n)])
|
https://github.com/Zulko/moviepy/blob/858bb81fba7e09f1f562283ed6d1394db883b6c7/project46/moviepy/video/tools/cuts.py#L115-L142
| 46 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27
] | 100 |
[] | 0 | true | 98.290598 | 28 | 2 | 100 | 22 |
def best(self, n=1, percent=None):
if percent is not None:
n = len(self) * percent / 100
return self[0] if n == 1 else FramesMatches(self[: int(n)])
| 28,466 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.