Skip to content

Commit

Permalink
Implement basic data model (#1076)
Browse files Browse the repository at this point in the history
* apply patch

* lint

* fix time mirror

* undo change

* merge #1943

* undo change

* new implementation for test_blit_with_opacity

* add more tests for slicing operations

* implement __mul__ using loop function as recommended

* or and div

* clip @ angle  is a rotation

* linters happy
  • Loading branch information
mgaitan authored May 26, 2023
1 parent 1cac443 commit 57c279a
Show file tree
Hide file tree
Showing 10 changed files with 238 additions and 26 deletions.
5 changes: 3 additions & 2 deletions docs/getting_started/quick_presentation.rst
Original file line number Diff line number Diff line change
Expand Up @@ -42,13 +42,14 @@ In a typical MoviePy script, you load video or audio files, modify them, put the
from moviepy import *

# Load myHolidays.mp4 and select the subclip 00:00:50 - 00:00:60
clip = VideoFileClip("myHolidays.mp4").subclip(50,60)
clip = VideoFileClip("myHolidays.mp4")
clip = clip.subclip(50, 60) # or just clip[50:60]

# Reduce the audio volume (volume x 0.8)
clip = clip.multiply_volume(0.8)

# Generate a text clip. You can customize the font, color, etc.
txt_clip = TextClip("My Holidays 2013",fontsize=70,color='white')
txt_clip = TextClip("My Holidays 2013", fontsize=70, color="white")

# Say that you want it to appear 10s at the center of the screen
txt_clip = txt_clip.with_position('center').with_duration(10)
Expand Down
86 changes: 83 additions & 3 deletions moviepy/Clip.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
"""Implements the central object of MoviePy, the Clip, and all the methods that
are common to the two subclasses of Clip, VideoClip and AudioClip.
"""

import copy as _copy
from functools import reduce
from numbers import Real
from operator import add

import numpy as np
import proglog
Expand Down Expand Up @@ -388,6 +390,9 @@ def subclip(self, start_time=0, end_time=None):
The ``mask`` and ``audio`` of the resulting subclip will be subclips of
``mask`` and ``audio`` the original clip, if they exist.
It's equivalent to slice the clip as a sequence, like
``clip[t_start:t_end]``.
Parameters
----------
Expand Down Expand Up @@ -562,10 +567,85 @@ def __eq__(self, other):

return True

# Support the Context Manager protocol, to ensure that resources are cleaned up.

def __enter__(self):
"""
Support the Context Manager protocol,
to ensure that resources are cleaned up.
"""
return self

def __exit__(self, exc_type, exc_value, traceback):
self.close()

def __getitem__(self, key):
"""
Support extended slice and index operations over
a clip object.
Simple slicing is implemented via `subclip`.
So, ``clip[t_start:t_end]`` is equivalent to
``clip.subclip(t_start, t_end)``. If ``t_start`` is not
given, default to ``0``, if ``t_end`` is not given,
default to ``self.duration``.
The slice object optionally support a third argument as
a ``speed`` coefficient (that could be negative),
``clip[t_start:t_end:speed]``.
For example ``clip[::-1]`` returns a reversed (a time_mirror fx)
the video and ``clip[:5:2]`` returns the segment from 0 to 5s
accelerated to 2x (ie. resulted duration would be 2.5s)
In addition, a tuple of slices is supported, resulting in the concatenation
of each segment. For example ``clip[(:1, 2:)]`` return a clip
with the segment from 1 to 2s removed.
If ``key`` is not a slice or tuple, we assume it's a time
value (expressed in any format supported by `cvsec`)
and return the frame at that time, passing the key
to ``get_frame``.
"""
apply_to = ["mask", "audio"]
if isinstance(key, slice):
# support for [start:end:speed] slicing. If speed is negative
# a time mirror is applied.
clip = self.subclip(key.start or 0, key.stop or self.duration)

if key.step:
# change speed of the subclip
factor = abs(key.step)
if factor != 1:
# change speed
clip = clip.time_transform(
lambda t: factor * t, apply_to=apply_to, keep_duration=True
)
clip = clip.with_duration(1.0 * clip.duration / factor)
if key.step < 0:
# time mirror
clip = clip.time_transform(
lambda t: clip.duration - t - 1,
keep_duration=True,
apply_to=apply_to,
)
return clip
elif isinstance(key, tuple):
# get a concatenation of subclips
return reduce(add, (self[k] for k in key))
else:
return self.get_frame(key)

def __del__(self):
self.close()

def __add__(self, other):
# concatenate. implemented in specialized classes
return NotImplemented

def __mul__(self, n):
# loop n times where N is a real
if not isinstance(n, Real):
return NotImplemented

from moviepy.video.fx.loop import loop

return loop(self, n)
5 changes: 5 additions & 0 deletions moviepy/audio/AudioClip.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,6 +259,11 @@ def write_audiofile(
logger=logger,
)

def __add__(self, other):
if isinstance(other, AudioClip):
return concatenate_audioclips([self, other])
return super(AudioClip, self).__add__(other)


class AudioArrayClip(AudioClip):
"""
Expand Down
43 changes: 42 additions & 1 deletion moviepy/video/VideoClip.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import os
import subprocess as sp
import tempfile
from numbers import Real

import numpy as np
import proglog
Expand Down Expand Up @@ -909,6 +910,46 @@ def afx(self, fun, *args, **kwargs):
"""
self.audio = self.audio.fx(fun, *args, **kwargs)

def __add__(self, other):
if isinstance(other, VideoClip):
from moviepy.video.compositing.concatenate import concatenate_videoclips

method = "chain" if self.size == other.size else "compose"
return concatenate_videoclips([self, other], method=method)
return super(VideoClip, self).__add__(other)

def __or__(self, other):
"""
Implement the or (self | other) to produce a video with self and other
placed side by side horizontally.
"""
if isinstance(other, VideoClip):
from moviepy.video.compositing.CompositeVideoClip import clips_array

return clips_array([[self, other]])
return super(VideoClip, self).__or__(other)

def __truediv__(self, other):
"""
Implement division (self / other) to produce a video with self
placed on top of other.
"""
if isinstance(other, VideoClip):
from moviepy.video.compositing.CompositeVideoClip import clips_array

return clips_array([[self], [other]])
return super(VideoClip, self).__or__(other)

def __matmul__(self, n):
if not isinstance(n, Real):
return NotImplemented
from moviepy.video.fx.rotate import rotate

return rotate(self, n)

def __and__(self, mask):
return self.with_mask(mask)


class DataVideoClip(VideoClip):
"""
Expand All @@ -918,7 +959,7 @@ class DataVideoClip(VideoClip):
Parameters
----------
data
A liste of datasets, each dataset being used for one frame of the clip
A list of datasets, each dataset being used for one frame of the clip
data_to_frame
A function d -> video frame, where d is one element of the list `data`
Expand Down
4 changes: 2 additions & 2 deletions moviepy/video/fx/freeze.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def freeze(clip, t=0, freeze_duration=None, total_duration=None, padding_end=0):
)
freeze_duration = total_duration - clip.duration

before = [clip.subclip(0, t)] if (t != 0) else []
before = [clip[:t]] if (t != 0) else []
freeze = [clip.to_ImageClip(t).with_duration(freeze_duration)]
after = [clip.subclip(t)] if (t != clip.duration) else []
after = [clip[t:]] if (t != clip.duration) else []
return concatenate_videoclips(before + freeze + after)
2 changes: 1 addition & 1 deletion moviepy/video/fx/time_mirror.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,4 @@ def time_mirror(clip):
The clip must have its ``duration`` attribute set.
The same effect is applied to the clip's audio and mask if any.
"""
return clip.time_transform(lambda t: clip.duration - t - 1, keep_duration=True)
return clip[::-1]
7 changes: 2 additions & 5 deletions moviepy/video/fx/time_symmetrize.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,7 @@
from moviepy.decorators import apply_to_mask, requires_duration
from moviepy.video.compositing.concatenate import concatenate_videoclips
from moviepy.video.fx.time_mirror import time_mirror
from moviepy.decorators import requires_duration


@requires_duration
@apply_to_mask
def time_symmetrize(clip):
"""
Returns a clip that plays the current clip once forwards and
Expand All @@ -13,4 +10,4 @@ def time_symmetrize(clip):
This effect is automatically applied to the clip's mask and audio
if they exist.
"""
return concatenate_videoclips([clip, clip.fx(time_mirror)])
return clip + clip[::-1]
67 changes: 67 additions & 0 deletions tests/test_VideoClip.py
Original file line number Diff line number Diff line change
Expand Up @@ -441,5 +441,72 @@ def test_afterimage(util):
final_clip.write_videofile(filename, fps=30, logger=None)


def test_add():
clip = VideoFileClip("media/fire2.mp4")
new_clip = clip[0:1] + clip[2:3.2]
assert new_clip.duration == 2.2
assert np.array_equal(new_clip[1.1], clip[2.1])


def test_slice_tuples():
clip = VideoFileClip("media/fire2.mp4")
new_clip = clip[0:1, 2:3.2]
assert new_clip.duration == 2.2
assert np.array_equal(new_clip[1.1], clip[2.1])


def test_slice_mirror():
clip = VideoFileClip("media/fire2.mp4")
new_clip = clip[::-1]
assert new_clip.duration == clip.duration
assert np.array_equal(new_clip[0], clip[clip.duration])


def test_slice_speed():
clip = BitmapClip([["A"], ["B"], ["C"], ["D"]], fps=1)
clip1 = clip[::0.5] # 1/2x speed
target1 = BitmapClip(
[["A"], ["A"], ["B"], ["B"], ["C"], ["C"], ["D"], ["D"]], fps=1
)
assert clip1 == target1


def test_mul():
clip = VideoFileClip("media/fire2.mp4")
new_clip = clip[0:1] * 2.5
assert new_clip.duration == 2.5
assert np.array_equal(new_clip[1.1], clip[0.1])


def test_and():
clip = VideoFileClip("media/fire2.mp4")
maskclip = ImageClip("media/afterimage.png", is_mask=True, transparent=True)
clip_with_mask = clip & maskclip
assert clip_with_mask.mask is maskclip


def test_or(util):
clip1 = BitmapClip([["R"]], fps=1)
clip2 = BitmapClip([["G"]], fps=1)
target = BitmapClip([["RG"]], fps=1)
result = clip1 | clip2
assert result == target


def test_truediv(util):
clip1 = BitmapClip([["R"]], fps=1)
clip2 = BitmapClip([["G"]], fps=1)
target = BitmapClip([["R", "G"]], fps=1)
result = clip1 / clip2
assert result == target


def test_matmul(util):
clip1 = BitmapClip([["RG"]], fps=1)
target = BitmapClip([["R", "G"]], fps=1)
result = clip1 @ 270
assert result == target


if __name__ == "__main__":
pytest.main()
43 changes: 32 additions & 11 deletions tests/test_compositing.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
from moviepy.video.compositing.concatenate import concatenate_videoclips
from moviepy.video.compositing.transitions import slide_in, slide_out
from moviepy.video.fx.resize import resize
from moviepy.video.io.VideoFileClip import VideoFileClip
from moviepy.video.VideoClip import BitmapClip, ColorClip


Expand Down Expand Up @@ -93,22 +92,44 @@ def test_concatenate_floating_point(util):
concat.write_videofile(os.path.join(util.TMP_DIR, "concat.mp4"), preset="ultrafast")


# def test_blit_with_opacity():
# # bitmap.mp4 has one second R, one second G, one second B
# clip1 = VideoFileClip("media/bitmap.mp4")
# # overlay same clip, shifted by 1 second, at half opacity
# clip2 = (
# VideoFileClip("media/bitmap.mp4")
# .subclip(1, 2)
# .with_start(0)
# .with_end(2)
# .with_opacity(0.5)
# )
# composite = CompositeVideoClip([clip1, clip2])
# bt = ClipPixelTest(composite)

# bt.expect_color_at(0.5, (0x7F, 0x7F, 0x00))
# bt.expect_color_at(1.5, (0x00, 0x7F, 0x7F))
# bt.expect_color_at(2.5, (0x00, 0x00, 0xFF))


def test_blit_with_opacity():
# bitmap.mp4 has one second R, one second G, one second B
clip1 = VideoFileClip("media/bitmap.mp4")
# overlay same clip, shifted by 1 second, at half opacity
clip2 = (
VideoFileClip("media/bitmap.mp4")
.subclip(1, 2)
.with_start(0)
.with_end(2)
.with_opacity(0.5)
# has one second R, one second G, one second B
size = (2, 2)
clip1 = (
ColorClip(size, color=(255, 0, 0), duration=1)
+ ColorClip(size, color=(0, 255, 0), duration=1)
+ ColorClip(size, color=(0, 0, 255), duration=1)
)

# overlay green at half opacity during first 2 sec
clip2 = ColorClip(size, color=(0, 255, 0), duration=2).with_opacity(0.5)
composite = CompositeVideoClip([clip1, clip2])
bt = ClipPixelTest(composite)

# red + 50% green
bt.expect_color_at(0.5, (0x7F, 0x7F, 0x00))
bt.expect_color_at(1.5, (0x00, 0x7F, 0x7F))
# green + 50% green
bt.expect_color_at(1.5, (0x00, 0xFF, 0x00))
# blue is after 2s, so keep untouched
bt.expect_color_at(2.5, (0x00, 0x00, 0xFF))


Expand Down
2 changes: 1 addition & 1 deletion tests/test_issues.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def size(t):
avatar.audio = None
maskclip = ImageClip("media/afterimage.png", is_mask=True, transparent=True)
avatar.with_mask(maskclip) # must set maskclip here..
concatenated = concatenate_videoclips([avatar] * 3)
concatenated = avatar * 3

tt = VideoFileClip("media/big_buck_bunny_0_30.webm").subclip(0, 3)
# TODO: Setting mask here does not work:
Expand Down

0 comments on commit 57c279a

Please sign in to comment.