28 #include "../include/Clip.h" 33 void Clip::init_settings()
63 wave_color =
Color((
unsigned char)0, (
unsigned char)123, (
unsigned char)255, (
unsigned char)255);
96 manage_reader =
false;
130 string ext = get_file_extension(path);
131 transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
134 if (ext==
"avi" || ext==
"mov" || ext==
"mkv" || ext==
"mpg" || ext==
"mpeg" || ext==
"mp3" || ext==
"mp4" || ext==
"mts" ||
135 ext==
"ogg" || ext==
"wav" || ext==
"wmv" || ext==
"webm" || ext==
"vob")
166 manage_reader =
true;
174 if (manage_reader && reader) {
200 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.",
"");
217 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.",
"");
224 ZmqLogger::Instance()->
AppendDebugMethod(
"Clip::Close",
"", -1,
"", -1,
"", -1,
"", -1,
"", -1,
"", -1);
231 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.",
"");
247 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.",
"");
262 requested_frame = adjust_frame_number_minimum(requested_frame);
268 else if (enabled_audio == -1 && reader && !reader->
info.
has_audio)
273 else if (enabled_video == -1 && reader && !reader->
info.
has_audio)
284 long int new_frame_number = requested_frame;
290 tr1::shared_ptr<Frame> original_frame = GetOrCreateFrame(new_frame_number);
293 tr1::shared_ptr<Frame> frame(
new Frame(new_frame_number, 1, 1,
"#000000", original_frame->GetAudioSamplesCount(), original_frame->GetAudioChannelsCount()));
294 frame->SampleRate(original_frame->SampleRate());
295 frame->ChannelsLayout(original_frame->ChannelsLayout());
299 frame->AddImage(tr1::shared_ptr<QImage>(
new QImage(*original_frame->GetImage())));
303 for (
int channel = 0; channel < original_frame->GetAudioChannelsCount(); channel++)
304 frame->AddAudio(
true, channel, 0, original_frame->GetAudioSamples(channel), original_frame->GetAudioSamplesCount(), 1.0);
307 tr1::shared_ptr<Frame> new_frame = get_time_mapped_frame(frame, requested_frame);
310 apply_effects(new_frame);
317 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.",
"");
321 string Clip::get_file_extension(
string path)
324 return path.substr(path.find_last_of(
".") + 1);
328 void Clip::reverse_buffer(juce::AudioSampleBuffer* buffer)
330 int number_of_samples = buffer->getNumSamples();
331 int channels = buffer->getNumChannels();
334 AudioSampleBuffer *reversed =
new juce::AudioSampleBuffer(channels, number_of_samples);
337 for (
int channel = 0; channel < channels; channel++)
340 for (
int s = number_of_samples - 1; s >= 0; s--, n++)
341 reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
347 for (
int channel = 0; channel < channels; channel++)
349 buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
356 tr1::shared_ptr<Frame> Clip::get_time_mapped_frame(tr1::shared_ptr<Frame> frame,
long int frame_number)
throw(
ReaderClosed)
361 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.",
"");
366 tr1::shared_ptr<Frame> new_frame;
369 juce::AudioSampleBuffer *samples = NULL;
374 int new_frame_number = round(
time.
GetValue(frame_number));
378 new_frame = tr1::shared_ptr<Frame>(
new Frame(new_frame_number, 1, 1,
"#000000", samples_in_frame, frame->GetAudioChannelsCount()));
381 new_frame->AddImage(GetOrCreateFrame(new_frame_number)->GetImage());
390 int number_of_samples = GetOrCreateFrame(new_frame_number)->GetAudioSamplesCount();
398 AudioSampleBuffer *resampled_buffer = NULL;
399 int resampled_buffer_size = 0;
402 samples =
new juce::AudioSampleBuffer(channels, number_of_samples);
406 for (
int channel = 0; channel < channels; channel++)
408 samples->addFrom(channel, 0, GetOrCreateFrame(new_frame_number)->GetAudioSamples(channel),
409 number_of_samples, 1.0f);
413 reverse_buffer(samples);
422 resampled_buffer_size = resampled_buffer->getNumSamples();
428 for (
int channel = 0; channel < channels; channel++)
430 new_frame->AddAudio(
true, channel, 0, resampled_buffer->getReadPointer(channel, start),
431 number_of_samples, 1.0f);
434 resampled_buffer = NULL;
437 else if (abs(delta) > 1 && abs(delta) < 100) {
441 int total_delta_samples = 0;
442 for (
int delta_frame = new_frame_number - (delta - 1);
443 delta_frame <= new_frame_number; delta_frame++)
449 samples =
new juce::AudioSampleBuffer(channels, total_delta_samples);
453 for (
int delta_frame = new_frame_number - (delta - 1);
454 delta_frame <= new_frame_number; delta_frame++) {
456 int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
457 AudioSampleBuffer *delta_samples =
new juce::AudioSampleBuffer(channels,
458 number_of_delta_samples);
459 delta_samples->clear();
461 for (
int channel = 0; channel < channels; channel++)
462 delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
463 number_of_delta_samples, 1.0f);
467 reverse_buffer(delta_samples);
470 for (
int channel = 0; channel < channels; channel++)
472 samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
473 number_of_delta_samples, 1.0f);
476 delete delta_samples;
477 delta_samples = NULL;
480 start += number_of_delta_samples;
485 int total_delta_samples = 0;
486 for (
int delta_frame = new_frame_number - (delta + 1);
487 delta_frame >= new_frame_number; delta_frame--)
493 samples =
new juce::AudioSampleBuffer(channels, total_delta_samples);
497 for (
int delta_frame = new_frame_number - (delta + 1);
498 delta_frame >= new_frame_number; delta_frame--) {
500 int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
501 AudioSampleBuffer *delta_samples =
new juce::AudioSampleBuffer(channels,
502 number_of_delta_samples);
503 delta_samples->clear();
505 for (
int channel = 0; channel < channels; channel++)
506 delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
507 number_of_delta_samples, 1.0f);
511 reverse_buffer(delta_samples);
514 for (
int channel = 0; channel < channels; channel++)
516 samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
517 number_of_delta_samples, 1.0f);
520 delete delta_samples;
521 delta_samples = NULL;
524 start += number_of_delta_samples;
529 resampler->
SetBuffer(samples,
float(start) /
float(number_of_samples));
533 int resampled_buffer_size = buffer->getNumSamples();
536 for (
int channel = 0; channel < channels; channel++)
538 new_frame->AddAudio(
true, channel, 0, buffer->getReadPointer(channel), number_of_samples, 1.0f);
545 samples =
new juce::AudioSampleBuffer(channels, number_of_samples);
549 for (
int channel = 0; channel < channels; channel++)
551 samples->addFrom(channel, 0, frame->GetAudioSamples(channel), number_of_samples, 1.0f);
555 reverse_buffer(samples);
558 for (
int channel = 0; channel < channels; channel++)
559 new_frame->AddAudio(
true, channel, 0, samples->getReadPointer(channel), number_of_samples, 1.0f);
577 long int Clip::adjust_frame_number_minimum(
long int frame_number)
580 if (frame_number < 1)
588 tr1::shared_ptr<Frame> Clip::GetOrCreateFrame(
long int number)
590 tr1::shared_ptr<Frame> new_frame;
597 ZmqLogger::Instance()->
AppendDebugMethod(
"Clip::GetOrCreateFrame (from reader)",
"number", number,
"samples_in_frame", samples_in_frame,
"", -1,
"", -1,
"", -1,
"", -1);
600 new_frame = reader->
GetFrame(number);
614 ZmqLogger::Instance()->
AppendDebugMethod(
"Clip::GetOrCreateFrame (create blank)",
"number", number,
"samples_in_frame", samples_in_frame,
"", -1,
"", -1,
"", -1,
"", -1);
634 Point requested_point(requested_frame, requested_frame);
638 root[
"id"] =
add_property_json(
"ID", 0.0,
"string",
Id(),
false, 0, -1, -1,
CONSTANT, -1,
true);
639 root[
"position"] =
add_property_json(
"Position",
Position(),
"float",
"",
false, 0, 0, 30 * 60 * 60 * 48,
CONSTANT, -1,
false);
640 root[
"layer"] =
add_property_json(
"Track",
Layer(),
"int",
"",
false, 0, 0, 20,
CONSTANT, -1,
false);
641 root[
"start"] =
add_property_json(
"Start",
Start(),
"float",
"",
false, 0, 0, 30 * 60 * 60 * 48,
CONSTANT, -1,
false);
642 root[
"end"] =
add_property_json(
"End",
End(),
"float",
"",
false, 0, 0, 30 * 60 * 60 * 48,
CONSTANT, -1,
false);
643 root[
"duration"] =
add_property_json(
"Duration",
Duration(),
"float",
"",
false, 0, 0, 30 * 60 * 60 * 48,
CONSTANT, -1,
true);
644 root[
"gravity"] =
add_property_json(
"Gravity",
gravity,
"int",
"",
false, 0, 0, 8,
CONSTANT, -1,
false);
645 root[
"scale"] =
add_property_json(
"Scale",
scale,
"int",
"",
false, 0, 0, 3,
CONSTANT, -1,
false);
646 root[
"anchor"] =
add_property_json(
"Anchor",
anchor,
"int",
"",
false, 0, 0, 1,
CONSTANT, -1,
false);
647 root[
"waveform"] =
add_property_json(
"Waveform", waveform,
"int",
"",
false, 0, 0, 1,
CONSTANT, -1,
false);
675 root[
"location_x"] =
add_property_json(
"Location X",
location_x.
GetValue(requested_frame),
"float",
"",
location_x.
Contains(requested_point),
location_x.
GetCount(), -1.0, 1.0,
location_x.
GetClosestPoint(requested_point).
interpolation,
location_x.
GetClosestPoint(requested_point).
co.
X,
false);
676 root[
"location_y"] =
add_property_json(
"Location Y",
location_y.
GetValue(requested_frame),
"float",
"",
location_y.
Contains(requested_point),
location_y.
GetCount(), -1.0, 1.0,
location_y.
GetClosestPoint(requested_point).
interpolation,
location_y.
GetClosestPoint(requested_point).
co.
X,
false);
677 root[
"scale_x"] =
add_property_json(
"Scale X",
scale_x.
GetValue(requested_frame),
"float",
"",
scale_x.
Contains(requested_point),
scale_x.
GetCount(), 0.0, 1.0,
scale_x.
GetClosestPoint(requested_point).
interpolation,
scale_x.
GetClosestPoint(requested_point).
co.
X,
false);
678 root[
"scale_y"] =
add_property_json(
"Scale Y",
scale_y.
GetValue(requested_frame),
"float",
"",
scale_y.
Contains(requested_point),
scale_y.
GetCount(), 0.0, 1.0,
scale_y.
GetClosestPoint(requested_point).
interpolation,
scale_y.
GetClosestPoint(requested_point).
co.
X,
false);
679 root[
"alpha"] =
add_property_json(
"Alpha",
alpha.
GetValue(requested_frame),
"float",
"",
alpha.
Contains(requested_point),
alpha.
GetCount(), 0.0, 1.0,
alpha.
GetClosestPoint(requested_point).
interpolation,
alpha.
GetClosestPoint(requested_point).
co.
X,
false);
680 root[
"rotation"] =
add_property_json(
"Rotation",
rotation.
GetValue(requested_frame),
"float",
"",
rotation.
Contains(requested_point),
rotation.
GetCount(), -360, 360,
rotation.
GetClosestPoint(requested_point).
interpolation,
rotation.
GetClosestPoint(requested_point).
co.
X,
false);
681 root[
"volume"] =
add_property_json(
"Volume",
volume.
GetValue(requested_frame),
"float",
"",
volume.
Contains(requested_point),
volume.
GetCount(), 0.0, 1.0,
volume.
GetClosestPoint(requested_point).
interpolation,
volume.
GetClosestPoint(requested_point).
co.
X,
false);
682 root[
"time"] =
add_property_json(
"Time",
time.
GetValue(requested_frame),
"float",
"",
time.
Contains(requested_point),
time.
GetCount(), 0.0, 30 * 60 * 60 * 48,
time.
GetClosestPoint(requested_point).
interpolation,
time.
GetClosestPoint(requested_point).
co.
X,
false);
683 root[
"channel_filter"] =
add_property_json(
"Channel Filter",
channel_filter.
GetValue(requested_frame),
"int",
"",
channel_filter.
Contains(requested_point),
channel_filter.
GetCount(), -1, 10,
channel_filter.
GetClosestPoint(requested_point).
interpolation,
channel_filter.
GetClosestPoint(requested_point).
co.
X,
false);
684 root[
"channel_mapping"] =
add_property_json(
"Channel Mapping",
channel_mapping.
GetValue(requested_frame),
"int",
"",
channel_mapping.
Contains(requested_point),
channel_mapping.
GetCount(), -1, 10,
channel_mapping.
GetClosestPoint(requested_point).
interpolation,
channel_mapping.
GetClosestPoint(requested_point).
co.
X,
false);
685 root[
"has_audio"] =
add_property_json(
"Enable Audio",
has_audio.
GetValue(requested_frame),
"int",
"",
has_audio.
Contains(requested_point),
has_audio.
GetCount(), -1, 1.0,
has_audio.
GetClosestPoint(requested_point).
interpolation,
has_audio.
GetClosestPoint(requested_point).
co.
X,
false);
686 root[
"has_video"] =
add_property_json(
"Enable Video",
has_video.
GetValue(requested_frame),
"int",
"",
has_video.
Contains(requested_point),
has_video.
GetCount(), -1, 1.0,
has_video.
GetClosestPoint(requested_point).
interpolation,
has_video.
GetClosestPoint(requested_point).
co.
X,
false);
688 root[
"wave_color"] =
add_property_json(
"Wave Color", 0.0,
"color",
"",
wave_color.
red.
Contains(requested_point),
wave_color.
red.
GetCount(), 0, 255,
wave_color.
red.
GetClosestPoint(requested_point).
interpolation,
wave_color.
red.
GetClosestPoint(requested_point).
co.
X,
false);
689 root[
"wave_color"][
"red"] =
add_property_json(
"Red",
wave_color.
red.
GetValue(requested_frame),
"float",
"",
wave_color.
red.
Contains(requested_point),
wave_color.
red.
GetCount(), 0, 255,
wave_color.
red.
GetClosestPoint(requested_point).
interpolation,
wave_color.
red.
GetClosestPoint(requested_point).
co.
X,
false);
690 root[
"wave_color"][
"blue"] =
add_property_json(
"Blue",
wave_color.
blue.
GetValue(requested_frame),
"float",
"",
wave_color.
blue.
Contains(requested_point),
wave_color.
blue.
GetCount(), 0, 255,
wave_color.
blue.
GetClosestPoint(requested_point).
interpolation,
wave_color.
blue.
GetClosestPoint(requested_point).
co.
X,
false);
691 root[
"wave_color"][
"green"] =
add_property_json(
"Green",
wave_color.
green.
GetValue(requested_frame),
"float",
"",
wave_color.
green.
Contains(requested_point),
wave_color.
green.
GetCount(), 0, 255,
wave_color.
green.
GetClosestPoint(requested_point).
interpolation,
wave_color.
green.
GetClosestPoint(requested_point).
co.
X,
false);
695 return root.toStyledString();
704 root[
"scale"] =
scale;
706 root[
"waveform"] = waveform;
736 root[
"effects"] = Json::Value(Json::arrayValue);
739 list<EffectBase*>::iterator effect_itr;
740 for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
744 root[
"effects"].append(existing_effect->
JsonValue());
760 bool success = reader.parse( value, root );
763 throw InvalidJSON(
"JSON could not be parsed (or is invalid)",
"");
773 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)",
"");
784 if (!root[
"gravity"].isNull())
786 if (!root[
"scale"].isNull())
788 if (!root[
"anchor"].isNull())
790 if (!root[
"waveform"].isNull())
791 waveform = root[
"waveform"].asBool();
792 if (!root[
"scale_x"].isNull())
794 if (!root[
"scale_y"].isNull())
796 if (!root[
"location_x"].isNull())
798 if (!root[
"location_y"].isNull())
800 if (!root[
"alpha"].isNull())
802 if (!root[
"rotation"].isNull())
804 if (!root[
"time"].isNull())
806 if (!root[
"volume"].isNull())
808 if (!root[
"wave_color"].isNull())
810 if (!root[
"crop_width"].isNull())
812 if (!root[
"crop_height"].isNull())
814 if (!root[
"crop_x"].isNull())
816 if (!root[
"crop_y"].isNull())
818 if (!root[
"shear_x"].isNull())
820 if (!root[
"shear_y"].isNull())
822 if (!root[
"channel_filter"].isNull())
824 if (!root[
"channel_mapping"].isNull())
826 if (!root[
"has_audio"].isNull())
828 if (!root[
"has_video"].isNull())
830 if (!root[
"perspective_c1_x"].isNull())
832 if (!root[
"perspective_c1_y"].isNull())
834 if (!root[
"perspective_c2_x"].isNull())
836 if (!root[
"perspective_c2_y"].isNull())
838 if (!root[
"perspective_c3_x"].isNull())
840 if (!root[
"perspective_c3_y"].isNull())
842 if (!root[
"perspective_c4_x"].isNull())
844 if (!root[
"perspective_c4_y"].isNull())
846 if (!root[
"effects"].isNull()) {
852 for (
int x = 0; x < root[
"effects"].size(); x++) {
854 Json::Value existing_effect = root[
"effects"][x];
859 if (!existing_effect[
"type"].isNull()) {
871 if (!root[
"reader"].isNull())
873 if (!root[
"reader"][
"type"].isNull())
876 bool already_open =
false;
880 already_open = reader->
IsOpen();
889 string type = root[
"reader"][
"type"].asString();
891 if (type ==
"FFmpegReader") {
894 reader =
new FFmpegReader(root[
"reader"][
"path"].asString());
897 }
else if (type ==
"QtImageReader") {
900 reader =
new QtImageReader(root[
"reader"][
"path"].asString());
903 #ifdef USE_IMAGEMAGICK 904 }
else if (type ==
"ImageReader") {
907 reader =
new ImageReader(root[
"reader"][
"path"].asString());
910 }
else if (type ==
"TextReader") {
917 }
else if (type ==
"ChunkReader") {
920 reader =
new ChunkReader(root[
"reader"][
"path"].asString(), (
ChunkVersion) root[
"reader"][
"chunk_version"].asInt());
923 }
else if (type ==
"DummyReader") {
932 manage_reader =
true;
943 void Clip::sort_effects()
953 effects.push_back(effect);
962 effects.remove(effect);
966 tr1::shared_ptr<Frame> Clip::apply_effects(tr1::shared_ptr<Frame> frame)
969 list<EffectBase*>::iterator effect_itr;
970 for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
976 frame = effect->
GetFrame(frame, frame->number);
vector< Coordinate > Values
Vector of all Values (i.e. the processed coordinates from the curve)
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
Keyframe perspective_c3_x
Curves representing X for coordinate 3.
void SetBuffer(AudioSampleBuffer *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
void Close()
Close the internal reader.
Json::Value JsonValue()
Generate Json::JsonValue for this object.
int num
Numerator for the fraction.
Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Keyframe perspective_c4_x
Curves representing X for coordinate 4.
This abstract class is the base class, used by all effects in libopenshot.
EffectBase * CreateEffect(string effect_type)
Align clip to the right of its parent (middle aligned)
float GetDelta(long int index)
Get the change in Y value (from the previous Y value)
Keyframe perspective_c1_x
Curves representing X for coordinate 1.
Keyframe green
Curve representing the green value (0 - 255)
Keyframe perspective_c2_x
Curves representing X for coordinate 2.
Keyframe crop_x
Curve representing X offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
string previous_properties
This string contains the previous JSON properties.
string PropertiesJSON(long int requested_frame)
float End()
Override End() method.
Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
tr1::shared_ptr< Frame > GetFrame(long int requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Align clip to the bottom right of its parent.
Json::Value JsonValue()
Generate Json::JsonValue for this object.
ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
GravityType gravity
The gravity of a clip determines where it snaps to it's parent.
int width
The width of the video (in pixesl)
Keyframe volume
Curve representing the volume (0 to 1)
This class represents a single frame of video (i.e. image & audio data)
This class is used as a simple, dummy reader, which always returns a blank frame. ...
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
InterpolationType interpolation
This is the interpolation mode.
Keyframe red
Curve representing the red value (0 - 255)
float duration
Length of time (in seconds)
bool Contains(Point p)
Does this keyframe contain a specific point.
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Keyframe time
Curve representing the frames over time to play (used for speed and direction of video) ...
A Point is the basic building block of a key-frame curve.
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
void AddEffect(EffectBase *effect)
Add an effect to the clip.
virtual void Close()=0
Close the reader (and any resources it was consuming)
This abstract class is the base class, used by all readers in libopenshot.
int Layer()
Get layer of clip on timeline (lower number is covered by higher numbers)
Keyframe has_audio
Override has_video and has_audio properties of clip (and their readers)
Exception when a reader is closed, and a frame is requested.
bool has_video
Determines if this file has a video stream.
Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes) ...
virtual tr1::shared_ptr< Frame > GetFrame(tr1::shared_ptr< Frame > frame, long int frame_number)=0
This method is required for all derived classes of EffectBase, and returns a modified openshot::Frame...
Color wave_color
Curve representing the color of the audio wave form.
Align clip to the top right of its parent.
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Align clip to the bottom left of its parent.
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Keyframe crop_width
Curve representing width in percent (0.0=0%, 1.0=100%)
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
float GetValue(long int index)
Get the value at a specific index.
Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
bool has_audio
Determines if this file has an audio stream.
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Keyframe blue
Curve representing the red value (0 - 255)
Keyframe crop_y
Curve representing Y offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
ScaleType scale
The scale determines how a clip should be resized to fit it's parent.
int height
The height of the video (in pixels)
Align clip to the bottom center of its parent.
Align clip to the top left of its parent.
Json::Value add_property_choice_json(string name, int value, int selected_value)
Generate JSON choice for a property (dropdown properties)
Json::Value add_property_json(string name, float value, string type, string memo, bool contains_point, int number_of_points, float min_value, float max_value, InterpolationType intepolation, int closest_point_x, bool readonly)
Generate JSON for a property.
Exception for files that can not be found or opened.
string Id()
Get basic properties.
Keyframe channel_filter
Audio channel filter and mappings.
float Position()
Get position on timeline (in seconds)
bool IsIncreasing(int index)
Get the direction of the curve at a specific index (increasing or decreasing)
void AppendDebugMethod(string method_name, string arg1_name, float arg1_value, string arg2_name, float arg2_value, string arg3_name, float arg3_value, string arg4_name, float arg4_value, string arg5_name, float arg5_value, string arg6_name, float arg6_value)
Append debug information.
void SetJson(string value)
Load JSON string into this object.
Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
float start
The position in seconds to start playing (used to trim the beginning of a clip)
Align clip to the left of its parent (middle aligned)
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
float X
The X value of the coordinate (usually representing the frame #)
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low...
Keyframe rotation
Curve representing the rotation (0 to 360)
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Scale the clip until both height and width fill the canvas (distort to fit)
vector< Point > Points
Vector of all Points.
Point GetClosestPoint(Point p)
Get current point (or closest point) from the X coordinate (i.e. the frame number) ...
ReaderInfo info
Information about the current media file.
Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Clip()
Default Constructor.
Anchor the clip to the viewport (which can be moved / animated around the canvas) ...
Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
AnchorType
This enumeration determines what parent a clip should be aligned to.
float end
The position in seconds to end playing (used to trim the ending of a clip)
Exception for frames that are out of bounds.
void Open()
Open the internal reader.
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
This class represents a color (used on the timeline and clips)
int GetInt(long int index)
Get the rounded INT value at a specific index.
Align clip to the center of its parent (middle aligned)
GravityType crop_gravity
Cropping needs to have a gravity to determine what side we are cropping.
void RemoveEffect(EffectBase *effect)
Remove an effect from the clip.
This namespace is the default namespace for all code in the openshot library.
long int GetCount()
Get the number of points (i.e. # of points)
virtual tr1::shared_ptr< Frame > GetFrame(long int number)=0
Coordinate co
This is the primary coordinate.
AnchorType anchor
The anchor determines what parent a clip should snap to.
Exception for invalid JSON.
Keyframe alpha
Curve representing the alpha (1 to 0)
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
AudioSampleBuffer * GetResampledBuffer()
Get the resampled audio buffer.
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
This class returns a listing of all effects supported by libopenshot.
Align clip to the top center of its parent.
int den
Denominator for the fraction.
int channels
The number of audio channels used in the audio stream.
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Scale the clip until either height or width fills the canvas (with no cropping)
Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
int GetSamplesPerFrame(Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Json::Value JsonValue()
Generate Json::JsonValue for this object.
long int GetLong(long int index)
Get the rounded LONG value at a specific index.
float Duration()
Get the length of this clip (in seconds)
GravityType
This enumeration determines how clips are aligned to their parent container.
Anchor the clip to the canvas.
Constant curves jump from their previous position to a new one (with no interpolation).
string Json()
Get and Set JSON methods.
float Start()
Get start position (in seconds) of clip (trim start of video)
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Exception when too many seek attempts happen.
Fraction GetRepeatFraction(long int index)
Get the fraction that represents how many times this value is repeated in the curve.
ReaderBase * Reader()
Get the current reader.
virtual bool IsOpen()=0
A thread safe version of GetFrame.
This class is used to resample audio data for many sequential frames.
Keyframe crop_height
Curve representing height in percent (0.0=0%, 1.0=100%)