OpenShot Library | libopenshot  0.1.2
Timeline.cpp
Go to the documentation of this file.
1 /**
2  * @file
3  * @brief Source file for Timeline class
4  * @author Jonathan Thomas <jonathan@openshot.org>
5  *
6  * @section LICENSE
7  *
8  * Copyright (c) 2008-2014 OpenShot Studios, LLC
9  * <http://www.openshotstudios.com/>. This file is part of
10  * OpenShot Library (libopenshot), an open-source project dedicated to
11  * delivering high quality video editing and animation solutions to the
12  * world. For more information visit <http://www.openshot.org/>.
13  *
14  * OpenShot Library (libopenshot) is free software: you can redistribute it
15  * and/or modify it under the terms of the GNU Lesser General Public License
16  * as published by the Free Software Foundation, either version 3 of the
17  * License, or (at your option) any later version.
18  *
19  * OpenShot Library (libopenshot) is distributed in the hope that it will be
20  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22  * GNU Lesser General Public License for more details.
23  *
24  * You should have received a copy of the GNU Lesser General Public License
25  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
26  */
27 
28 #include "../include/Timeline.h"
29 
30 using namespace openshot;
31 
32 // Default Constructor for the timeline (which sets the canvas width and height)
33 Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout) :
34  is_open(false), auto_map_clips(true)
35 {
36  // Init viewport size (curve based, because it can be animated)
37  viewport_scale = Keyframe(100.0);
38  viewport_x = Keyframe(0.0);
39  viewport_y = Keyframe(0.0);
40 
41  // Init background color
42  color.red = Keyframe(0.0);
43  color.green = Keyframe(0.0);
44  color.blue = Keyframe(0.0);
45 
46  // Init FileInfo struct (clear all values)
47  info.width = width;
48  info.height = height;
49  info.fps = fps;
50  info.sample_rate = sample_rate;
51  info.channels = channels;
52  info.channel_layout = channel_layout;
54  info.duration = 60 * 30; // 30 minute default duration
55  info.has_audio = true;
56  info.has_video = true;
58 
59  // Init cache
61 }
62 
63 // Add an openshot::Clip to the timeline
65 {
66  // All clips should be converted to the frame rate of this timeline
67  if (auto_map_clips)
68  // Apply framemapper (or update existing framemapper)
69  apply_mapper_to_clip(clip);
70 
71  // Add clip to list
72  clips.push_back(clip);
73 
74  // Sort clips
75  sort_clips();
76 }
77 
78 // Add an effect to the timeline
80 {
81  // Add effect to list
82  effects.push_back(effect);
83 
84  // Sort effects
85  sort_effects();
86 }
87 
88 // Remove an effect from the timeline
90 {
91  effects.remove(effect);
92 }
93 
94 // Remove an openshot::Clip to the timeline
96 {
97  clips.remove(clip);
98 }
99 
100 // Apply a FrameMapper to a clip which matches the settings of this timeline
101 void Timeline::apply_mapper_to_clip(Clip* clip)
102 {
103  // Determine type of reader
104  ReaderBase* clip_reader = NULL;
105  if (clip->Reader()->Name() == "FrameMapper")
106  {
107  // Get the existing reader
108  clip_reader = (ReaderBase*) clip->Reader();
109 
110  } else {
111 
112  // Create a new FrameMapper to wrap the current reader
114  }
115 
116  // Update the mapping
117  FrameMapper* clip_mapped_reader = (FrameMapper*) clip_reader;
119 
120  // Update clip reader
121  clip->Reader(clip_reader);
122 }
123 
124 // Apply the timeline's framerate and samplerate to all clips
126 {
127  // Clear all cached frames
128  final_cache.Clear();
129 
130  // Loop through all clips
131  list<Clip*>::iterator clip_itr;
132  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
133  {
134  // Get clip object from the iterator
135  Clip *clip = (*clip_itr);
136 
137  // Apply framemapper (or update existing framemapper)
138  apply_mapper_to_clip(clip);
139  }
140 }
141 
142 // Calculate time of a frame number, based on a framerate
143 float Timeline::calculate_time(long int number, Fraction rate)
144 {
145  // Get float version of fps fraction
146  float raw_fps = rate.ToFloat();
147 
148  // Return the time (in seconds) of this frame
149  return float(number - 1) / raw_fps;
150 }
151 
152 // Apply effects to the source frame (if any)
153 tr1::shared_ptr<Frame> Timeline::apply_effects(tr1::shared_ptr<Frame> frame, long int timeline_frame_number, int layer)
154 {
155  // Calculate time of frame
156  float requested_time = calculate_time(timeline_frame_number, info.fps);
157 
158  // Debug output
159  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects", "requested_time", requested_time, "frame->number", frame->number, "timeline_frame_number", timeline_frame_number, "layer", layer, "", -1, "", -1);
160 
161  // Find Effects at this position and layer
162  list<EffectBase*>::iterator effect_itr;
163  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
164  {
165  // Get effect object from the iterator
166  EffectBase *effect = (*effect_itr);
167 
168  // Does clip intersect the current requested time
169  float effect_duration = effect->End() - effect->Start();
170  bool does_effect_intersect = (effect->Position() <= requested_time && effect->Position() + effect_duration >= requested_time && effect->Layer() == layer);
171 
172  // Debug output
173  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects (Does effect intersect)", "effect->Position()", effect->Position(), "requested_time", requested_time, "does_effect_intersect", does_effect_intersect, "timeline_frame_number", timeline_frame_number, "layer", layer, "effect_duration", effect_duration);
174 
175  // Clip is visible
176  if (does_effect_intersect)
177  {
178  // Determine the frame needed for this clip (based on the position on the timeline)
179  float time_diff = (requested_time - effect->Position()) + effect->Start();
180  int effect_frame_number = round(time_diff * info.fps.ToFloat()) + 1;
181 
182  // Debug output
183  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects (Process Effect)", "time_diff", time_diff, "effect_frame_number", effect_frame_number, "effect_duration", effect_duration, "does_effect_intersect", does_effect_intersect, "", -1, "", -1);
184 
185  // Apply the effect to this frame
186  frame = effect->GetFrame(frame, effect_frame_number);
187  }
188 
189  } // end effect loop
190 
191  // Return modified frame
192  return frame;
193 }
194 
195 // Get or generate a blank frame
196 tr1::shared_ptr<Frame> Timeline::GetOrCreateFrame(Clip* clip, long int number)
197 {
198  tr1::shared_ptr<Frame> new_frame;
199 
200  // Init some basic properties about this frame
201  int samples_in_frame = Frame::GetSamplesPerFrame(number, info.fps, info.sample_rate, info.channels);
202 
203  try {
204  // Debug output
205  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
206 
207  // Attempt to get a frame (but this could fail if a reader has just been closed)
208  new_frame = tr1::shared_ptr<Frame>(clip->GetFrame(number));
209 
210  // Return real frame
211  return new_frame;
212 
213  } catch (const ReaderClosed & e) {
214  // ...
215  } catch (const TooManySeeks & e) {
216  // ...
217  } catch (const OutOfBoundsFrame & e) {
218  // ...
219  }
220 
221  // Debug output
222  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
223 
224  // Create blank frame
225  new_frame = tr1::shared_ptr<Frame>(new Frame(number, info.width, info.height, "#000000", samples_in_frame, info.channels));
226  new_frame->SampleRate(info.sample_rate);
227  new_frame->ChannelsLayout(info.channel_layout);
228  return new_frame;
229 }
230 
231 // Process a new layer of video or audio
232 void Timeline::add_layer(tr1::shared_ptr<Frame> new_frame, Clip* source_clip, long int clip_frame_number, long int timeline_frame_number, bool is_top_clip)
233 {
234  // Get the clip's frame & image
235  tr1::shared_ptr<Frame> source_frame = GetOrCreateFrame(source_clip, clip_frame_number);
236 
237  // No frame found... so bail
238  if (!source_frame)
239  return;
240 
241  // Debug output
242  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer", "new_frame->number", new_frame->number, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1, "", -1, "", -1);
243 
244  /* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
245  if (source_clip->Waveform())
246  {
247  // Debug output
248  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Generate Waveform Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
249 
250  // Get the color of the waveform
251  int red = source_clip->wave_color.red.GetInt(clip_frame_number);
252  int green = source_clip->wave_color.green.GetInt(clip_frame_number);
253  int blue = source_clip->wave_color.blue.GetInt(clip_frame_number);
254  int alpha = source_clip->wave_color.alpha.GetInt(clip_frame_number);
255 
256  // Generate Waveform Dynamically (the size of the timeline)
257  tr1::shared_ptr<QImage> source_image = source_frame->GetWaveform(info.width, info.height, red, green, blue, alpha);
258  source_frame->AddImage(tr1::shared_ptr<QImage>(source_image));
259  }
260 
261  /* Apply effects to the source frame (if any). If multiple clips are overlapping, only process the
262  * effects on the top clip. */
263  if (is_top_clip)
264  source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->Layer());
265 
266  // Declare an image to hold the source frame's image
267  tr1::shared_ptr<QImage> source_image;
268 
269  /* COPY AUDIO - with correct volume */
270  if (source_clip->Reader()->info.has_audio) {
271 
272  // Debug output
273  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Copy Audio)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);
274 
275  if (source_frame->GetAudioChannelsCount() == info.channels)
276  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
277  {
278  float initial_volume = 1.0f;
279  float previous_volume = source_clip->volume.GetValue(clip_frame_number - 1); // previous frame's percentage of volume (0 to 1)
280  float volume = source_clip->volume.GetValue(clip_frame_number); // percentage of volume (0 to 1)
281  int channel_filter = source_clip->channel_filter.GetInt(clip_frame_number); // optional channel to filter (if not -1)
282  int channel_mapping = source_clip->channel_mapping.GetInt(clip_frame_number); // optional channel to map this channel to (if not -1)
283 
284  // If channel filter enabled, check for correct channel (and skip non-matching channels)
285  if (channel_filter != -1 && channel_filter != channel)
286  continue; // skip to next channel
287 
288  // If channel mapping disabled, just use the current channel
289  if (channel_mapping == -1)
290  channel_mapping = channel;
291 
292  // If no ramp needed, set initial volume = clip's volume
293  if (isEqual(previous_volume, volume))
294  initial_volume = volume;
295 
296  // Apply ramp to source frame (if needed)
297  if (!isEqual(previous_volume, volume))
298  source_frame->ApplyGainRamp(channel_mapping, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
299 
300  // TODO: Improve FrameMapper (or Timeline) to always get the correct number of samples per frame.
301  // Currently, the ResampleContext sometimes leaves behind a few samples for the next call, and the
302  // number of samples returned is variable... and does not match the number expected.
303  // This is a crude solution at best. =)
304  if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount())
305  // Force timeline frame to match the source frame
306  new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);
307 
308  // Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to
309  // be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen).
310  new_frame->AddAudio(false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), initial_volume);
311 
312  }
313  else
314  // Debug output
315  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (No Audio Copied - Wrong # of Channels)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);
316 
317  }
318 
319  // Skip out if only an audio frame
320  if (!source_clip->Waveform() && !source_clip->Reader()->info.has_video)
321  // Skip the rest of the image processing for performance reasons
322  return;
323 
324  // Debug output
325  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Get Source Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
326 
327  // Get actual frame image data
328  source_image = source_frame->GetImage();
329 
330  // Get some basic image properties
331  int source_width = source_image->width();
332  int source_height = source_image->height();
333 
334  /* ALPHA & OPACITY */
335  if (source_clip->alpha.GetValue(clip_frame_number) != 1.0)
336  {
337  float alpha = source_clip->alpha.GetValue(clip_frame_number);
338 
339  // Get source image's pixels
340  unsigned char *pixels = (unsigned char *) source_image->bits();
341 
342  // Loop through pixels
343  for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
344  {
345  // Get the alpha values from the pixel
346  int A = pixels[byte_index + 3];
347 
348  // Apply alpha to pixel
349  pixels[byte_index + 3] *= alpha;
350  }
351 
352  // Debug output
353  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Set Alpha & Opacity)", "alpha", alpha, "source_frame->number", source_frame->number, "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
354  }
355 
356  /* RESIZE SOURCE IMAGE - based on scale type */
357  switch (source_clip->scale)
358  {
359  case (SCALE_FIT):
360  // keep aspect ratio
361  source_image = tr1::shared_ptr<QImage>(new QImage(source_image->scaled(info.width, info.height, Qt::KeepAspectRatio, Qt::SmoothTransformation)));
362  source_width = source_image->width();
363  source_height = source_image->height();
364 
365  // Debug output
366  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_FIT)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "", -1, "", -1, "", -1);
367  break;
368 
369  case (SCALE_STRETCH):
370  // ignore aspect ratio
371  source_image = tr1::shared_ptr<QImage>(new QImage(source_image->scaled(info.width, info.height, Qt::IgnoreAspectRatio, Qt::SmoothTransformation)));
372  source_width = source_image->width();
373  source_height = source_image->height();
374 
375  // Debug output
376  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_STRETCH)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "", -1, "", -1, "", -1);
377  break;
378 
379  case (SCALE_CROP):
380  QSize width_size(info.width, round(info.width / (float(source_width) / float(source_height))));
381  QSize height_size(round(info.height / (float(source_height) / float(source_width))), info.height);
382 
383  // respect aspect ratio
384  if (width_size.width() >= info.width && width_size.height() >= info.height)
385  source_image = tr1::shared_ptr<QImage>(new QImage(source_image->scaled(width_size.width(), width_size.height(), Qt::KeepAspectRatio, Qt::SmoothTransformation)));
386  else
387  source_image = tr1::shared_ptr<QImage>(new QImage(source_image->scaled(height_size.width(), height_size.height(), Qt::KeepAspectRatio, Qt::SmoothTransformation))); // height is larger, so resize to it
388  source_width = source_image->width();
389  source_height = source_image->height();
390 
391  // Debug output
392  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_CROP)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "", -1, "", -1, "", -1);
393  break;
394  }
395 
396  /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
397  float x = 0.0; // left
398  float y = 0.0; // top
399 
400  // Adjust size for scale x and scale y
401  float sx = source_clip->scale_x.GetValue(clip_frame_number); // percentage X scale
402  float sy = source_clip->scale_y.GetValue(clip_frame_number); // percentage Y scale
403  float scaled_source_width = source_width * sx;
404  float scaled_source_height = source_height * sy;
405 
406  switch (source_clip->gravity)
407  {
408  case (GRAVITY_TOP):
409  x = (info.width - scaled_source_width) / 2.0; // center
410  break;
411  case (GRAVITY_TOP_RIGHT):
412  x = info.width - scaled_source_width; // right
413  break;
414  case (GRAVITY_LEFT):
415  y = (info.height - scaled_source_height) / 2.0; // center
416  break;
417  case (GRAVITY_CENTER):
418  x = (info.width - scaled_source_width) / 2.0; // center
419  y = (info.height - scaled_source_height) / 2.0; // center
420  break;
421  case (GRAVITY_RIGHT):
422  x = info.width - scaled_source_width; // right
423  y = (info.height - scaled_source_height) / 2.0; // center
424  break;
425  case (GRAVITY_BOTTOM_LEFT):
426  y = (info.height - scaled_source_height); // bottom
427  break;
428  case (GRAVITY_BOTTOM):
429  x = (info.width - scaled_source_width) / 2.0; // center
430  y = (info.height - scaled_source_height); // bottom
431  break;
432  case (GRAVITY_BOTTOM_RIGHT):
433  x = info.width - scaled_source_width; // right
434  y = (info.height - scaled_source_height); // bottom
435  break;
436  }
437 
438  // Debug output
439  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Gravity)", "source_frame->number", source_frame->number, "source_clip->gravity", source_clip->gravity, "info.width", info.width, "source_width", source_width, "info.height", info.height, "source_height", source_height);
440 
441  /* LOCATION, ROTATION, AND SCALE */
442  float r = source_clip->rotation.GetValue(clip_frame_number); // rotate in degrees
443  x += (info.width * source_clip->location_x.GetValue(clip_frame_number)); // move in percentage of final width
444  y += (info.height * source_clip->location_y.GetValue(clip_frame_number)); // move in percentage of final height
445  bool is_x_animated = source_clip->location_x.Points.size() > 1;
446  bool is_y_animated = source_clip->location_y.Points.size() > 1;
447 
448  int offset_x = -1;
449  int offset_y = -1;
450  bool transformed = false;
451  QTransform transform;
452  if ((!isEqual(x, 0) || !isEqual(y, 0)) && (isEqual(r, 0) && isEqual(sx, 1) && isEqual(sy, 1) && !is_x_animated && !is_y_animated))
453  {
454  // SIMPLE OFFSET
455  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: SIMPLE)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
456 
457  // If only X and Y are different, and no animation is being used (just set the offset for speed)
458  transformed = true;
459 
460  // Set QTransform
461  transform.translate(x, y);
462 
463  } else if (!isEqual(r, 0) || !isEqual(x, 0) || !isEqual(y, 0) || !isEqual(sx, 1) || !isEqual(sy, 1))
464  {
465  // COMPLEX DISTORTION
466  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: COMPLEX)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
467 
468  // Use the QTransform object, which can be very CPU intensive
469  transformed = true;
470 
471  // Set QTransform
472  if (!isEqual(r, 0)) {
473  // ROTATE CLIP
474  float origin_x = x + (source_width / 2.0);
475  float origin_y = y + (source_height / 2.0);
476  transform.translate(origin_x, origin_y);
477  transform.rotate(r);
478  transform.translate(-origin_x,-origin_y);
479  }
480 
481  // Set QTransform
482  if (!isEqual(x, 0) || !isEqual(y, 0)) {
483  // TRANSLATE/MOVE CLIP
484  transform.translate(x, y);
485  }
486 
487  if (!isEqual(sx, 0) || !isEqual(sy, 0)) {
488  // TRANSLATE/MOVE CLIP
489  transform.scale(sx, sy);
490  }
491 
492  // Debug output
493  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: COMPLEX: Completed ScaleRotateTranslateDistortion)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
494  }
495 
496  // Debug output
497  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "offset_x", offset_x, "offset_y", offset_y, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1);
498 
499  /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */
500  tr1::shared_ptr<QImage> new_image = new_frame->GetImage();
501 
502  // Load timeline's new frame image into a QPainter
503  QPainter painter(new_image.get());
504  painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
505 
506  // Apply transform (translate, rotate, scale)... if any
507  if (transformed)
508  painter.setTransform(transform);
509 
510  // Composite a new layer onto the image
511  painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
512  painter.drawImage(0, 0, *source_image);
513  painter.end();
514 
515  // Debug output
516  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "offset_x", offset_x, "offset_y", offset_y, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1);
517 }
518 
519 // Update the list of 'opened' clips
520 void Timeline::update_open_clips(Clip *clip, bool does_clip_intersect)
521 {
522  ZmqLogger::Instance()->AppendDebugMethod("Timeline::update_open_clips (before)", "does_clip_intersect", does_clip_intersect, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size(), "", -1, "", -1, "", -1);
523 
524  // is clip already in list?
525  bool clip_found = open_clips.count(clip);
526 
527  if (clip_found && !does_clip_intersect)
528  {
529  // Remove clip from 'opened' list, because it's closed now
530  open_clips.erase(clip);
531 
532  // Close clip
533  clip->Close();
534  }
535  else if (!clip_found && does_clip_intersect)
536  {
537  // Add clip to 'opened' list, because it's missing
538  open_clips[clip] = clip;
539 
540  // Open the clip
541  clip->Open();
542  }
543 
544  // Debug output
545  ZmqLogger::Instance()->AppendDebugMethod("Timeline::update_open_clips (after)", "does_clip_intersect", does_clip_intersect, "clip_found", clip_found, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size(), "", -1, "", -1);
546 }
547 
548 // Sort clips by position on the timeline
549 void Timeline::sort_clips()
550 {
551  // Debug output
552  ZmqLogger::Instance()->AppendDebugMethod("Timeline::SortClips", "clips.size()", clips.size(), "", -1, "", -1, "", -1, "", -1, "", -1);
553 
554  // sort clips
555  clips.sort(CompareClips());
556 }
557 
558 // Sort effects by position on the timeline
559 void Timeline::sort_effects()
560 {
561  // sort clips
562  effects.sort(CompareEffects());
563 }
564 
565 // Close the reader (and any resources it was consuming)
567 {
568  ZmqLogger::Instance()->AppendDebugMethod("Timeline::Close", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
569 
570  // Close all open clips
571  list<Clip*>::iterator clip_itr;
572  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
573  {
574  // Get clip object from the iterator
575  Clip *clip = (*clip_itr);
576 
577  // Open or Close this clip, based on if it's intersecting or not
578  update_open_clips(clip, false);
579  }
580 
581  // Mark timeline as closed
582  is_open = false;
583 
584  // Clear cache
585  final_cache.Clear();
586 }
587 
588 // Open the reader (and start consuming resources)
590 {
591  is_open = true;
592 }
593 
594 // Compare 2 floating point numbers for equality
595 bool Timeline::isEqual(double a, double b)
596 {
597  return fabs(a - b) < 0.000001;
598 }
599 
600 // Get an openshot::Frame object for a specific frame number of this reader.
601 tr1::shared_ptr<Frame> Timeline::GetFrame(long int requested_frame) throw(ReaderClosed, OutOfBoundsFrame)
602 {
603  // Check for open reader (or throw exception)
604  if (!is_open)
605  throw ReaderClosed("The Timeline is closed. Call Open() before calling this method.", "");
606 
607  // Adjust out of bounds frame number
608  if (requested_frame < 1)
609  requested_frame = 1;
610 
611  // Check cache
612  tr1::shared_ptr<Frame> frame = final_cache.GetFrame(requested_frame);
613  if (frame) {
614  // Debug output
615  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Cached frame found)", "requested_frame", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1);
616 
617  // Return cached frame
618  return frame;
619  }
620  else
621  {
622  // Create a scoped lock, allowing only a single thread to run the following code at one time
623  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
624 
625  // Check cache again (due to locking)
626  frame = final_cache.GetFrame(requested_frame);
627  if (frame) {
628  // Debug output
629  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Cached frame found on 2nd look)", "requested_frame", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1);
630 
631  // Return cached frame
632  return frame;
633  }
634 
635  // Minimum number of frames to process (for performance reasons)
636  int minimum_frames = OPEN_MP_NUM_PROCESSORS;
637 
638  // Get a list of clips that intersect with the requested section of timeline
639  // This also opens the readers for intersecting clips, and marks non-intersecting clips as 'needs closing'
640  vector<Clip*> nearby_clips = find_intersecting_clips(requested_frame, minimum_frames, true);
641 
642  omp_set_num_threads(OPEN_MP_NUM_PROCESSORS);
643  // Allow nested OpenMP sections
644  omp_set_nested(true);
645 
646  // Debug output
647  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame", "requested_frame", requested_frame, "minimum_frames", minimum_frames, "OPEN_MP_NUM_PROCESSORS", OPEN_MP_NUM_PROCESSORS, "", -1, "", -1, "", -1);
648 
649  // GENERATE CACHE FOR CLIPS (IN FRAME # SEQUENCE)
650  // Determine all clip frames, and request them in order (to keep resampled audio in sequence)
651  for (long int frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
652  {
653  // Calculate time of timeline frame
654  float requested_time = calculate_time(frame_number, info.fps);
655  // Loop through clips
656  for (int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
657  {
658  // Get clip object from the iterator
659  Clip *clip = nearby_clips[clip_index];
660  bool does_clip_intersect = (clip->Position() <= requested_time && clip->Position() + clip->Duration() >= requested_time);
661  if (does_clip_intersect)
662  {
663  // Get clip frame #
664  float time_diff = (requested_time - clip->Position()) + clip->Start();
665  int clip_frame_number = round(time_diff * info.fps.ToFloat()) + 1;
666  // Cache clip object
667  clip->GetFrame(clip_frame_number);
668  }
669  }
670  }
671 
672  #pragma omp parallel
673  {
674  // Loop through all requested frames
675  #pragma omp for ordered firstprivate(nearby_clips, requested_frame, minimum_frames)
676  for (long int frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
677  {
678  // Debug output
679  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (processing frame)", "frame_number", frame_number, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1);
680 
681  // Init some basic properties about this frame
682  int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels);
683 
684  // Create blank frame (which will become the requested frame)
685  tr1::shared_ptr<Frame> new_frame(tr1::shared_ptr<Frame>(new Frame(frame_number, info.width, info.height, "#000000", samples_in_frame, info.channels)));
686  new_frame->SampleRate(info.sample_rate);
687  new_frame->ChannelsLayout(info.channel_layout);
688 
689  // Debug output
690  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Adding solid color)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1);
691 
692  // Add Background Color to 1st layer (if animated or not black)
693  if ((color.red.Points.size() > 1 || color.green.Points.size() > 1 || color.blue.Points.size() > 1) ||
694  (color.red.GetValue(frame_number) != 0.0 || color.green.GetValue(frame_number) != 0.0 || color.blue.GetValue(frame_number) != 0.0))
695  new_frame->AddColor(info.width, info.height, color.GetColorHex(frame_number));
696 
697  // Calculate time of frame
698  float requested_time = calculate_time(frame_number, info.fps);
699 
700  // Debug output
701  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Loop through clips)", "frame_number", frame_number, "requested_time", requested_time, "clips.size()", clips.size(), "nearby_clips.size()", nearby_clips.size(), "", -1, "", -1);
702 
703  // Find Clips near this time
704  for (int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
705  {
706  // Get clip object from the iterator
707  Clip *clip = nearby_clips[clip_index];
708 
709  // Does clip intersect the current requested time
710  bool does_clip_intersect = (clip->Position() <= requested_time && clip->Position() + clip->Duration() >= requested_time);
711 
712  // Debug output
713  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Does clip intersect)", "frame_number", frame_number, "requested_time", requested_time, "clip->Position()", clip->Position(), "clip->Duration()", clip->Duration(), "does_clip_intersect", does_clip_intersect, "", -1);
714 
715  // Clip is visible
716  if (does_clip_intersect)
717  {
718  // Determine if clip is "top" clip on this layer (only happens when multiple clips are overlapping)
719  bool is_top_clip = true;
720  for (int top_clip_index = 0; top_clip_index < nearby_clips.size(); top_clip_index++)
721  {
722  Clip *nearby_clip = nearby_clips[top_clip_index];
723  if (clip->Id() != nearby_clip->Id() && clip->Layer() == nearby_clip->Layer() &&
724  nearby_clip->Position() <= requested_time && nearby_clip->Position() + nearby_clip->Duration() >= requested_time &&
725  nearby_clip->Position() > clip->Position()) {
726  is_top_clip = false;
727  break;
728  }
729  }
730 
731  // Determine the frame needed for this clip (based on the position on the timeline)
732  float time_diff = (requested_time - clip->Position()) + clip->Start();
733  int clip_frame_number = round(time_diff * info.fps.ToFloat()) + 1;
734 
735  // Debug output
736  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Calculate clip's frame #)", "time_diff", time_diff, "requested_time", requested_time, "clip->Position()", clip->Position(), "clip->Start()", clip->Start(), "info.fps.ToFloat()", info.fps.ToFloat(), "clip_frame_number", clip_frame_number);
737 
738  // Add clip's frame as layer
739  add_layer(new_frame, clip, clip_frame_number, frame_number, is_top_clip);
740 
741  } else
742  // Debug output
743  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (clip does not intersect)", "frame_number", frame_number, "requested_time", requested_time, "does_clip_intersect", does_clip_intersect, "", -1, "", -1, "", -1);
744 
745  } // end clip loop
746 
747  // Debug output
748  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Add frame to cache)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1);
749 
750  // Add final frame to cache
751  final_cache.Add(frame_number, new_frame);
752 
753  } // end frame loop
754  } // end parallel
755 
756  // Debug output
757  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (end parallel region)", "requested_frame", requested_frame, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1);
758 
759  // Return frame (or blank frame)
760  return final_cache.GetFrame(requested_frame);
761  }
762 }
763 
764 
765 // Find intersecting clips (or non intersecting clips)
766 vector<Clip*> Timeline::find_intersecting_clips(long int requested_frame, int number_of_frames, bool include)
767 {
768  // Find matching clips
769  vector<Clip*> matching_clips;
770 
771  // Calculate time of frame
772  float min_requested_time = calculate_time(requested_frame, info.fps);
773  float max_requested_time = calculate_time(requested_frame + (number_of_frames - 1), info.fps);
774 
775  // Re-Sort Clips (since they likely changed)
776  sort_clips();
777 
778  // Find Clips at this time
779  list<Clip*>::iterator clip_itr;
780  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
781  {
782  // Get clip object from the iterator
783  Clip *clip = (*clip_itr);
784 
785  // Does clip intersect the current requested time
786  float clip_duration = clip->End() - clip->Start();
787  bool does_clip_intersect = (clip->Position() <= min_requested_time && clip->Position() + clip_duration >= min_requested_time) ||
788  (clip->Position() > min_requested_time && clip->Position() <= max_requested_time);
789 
790  // Debug output
791  ZmqLogger::Instance()->AppendDebugMethod("Timeline::find_intersecting_clips (Is clip near or intersecting)", "requested_frame", requested_frame, "min_requested_time", min_requested_time, "max_requested_time", max_requested_time, "clip->Position()", clip->Position(), "clip_duration", clip_duration, "does_clip_intersect", does_clip_intersect);
792 
793  // Open (or schedule for closing) this clip, based on if it's intersecting or not
794  #pragma omp critical (reader_lock)
795  update_open_clips(clip, does_clip_intersect);
796 
797 
798  // Clip is visible
799  if (does_clip_intersect && include)
800  // Add the intersecting clip
801  matching_clips.push_back(clip);
802 
803  else if (!does_clip_intersect && !include)
804  // Add the non-intersecting clip
805  matching_clips.push_back(clip);
806 
807  } // end clip loop
808 
809  // return list
810  return matching_clips;
811 }
812 
813 // Generate JSON string of this object
814 string Timeline::Json() {
815 
816  // Return formatted string
817  return JsonValue().toStyledString();
818 }
819 
820 // Generate Json::JsonValue for this object
821 Json::Value Timeline::JsonValue() {
822 
823  // Create root json object
824  Json::Value root = ReaderBase::JsonValue(); // get parent properties
825  root["type"] = "Timeline";
826  root["viewport_scale"] = viewport_scale.JsonValue();
827  root["viewport_x"] = viewport_x.JsonValue();
828  root["viewport_y"] = viewport_y.JsonValue();
829  root["color"] = color.JsonValue();
830 
831  // Add array of clips
832  root["clips"] = Json::Value(Json::arrayValue);
833 
834  // Find Clips at this time
835  list<Clip*>::iterator clip_itr;
836  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
837  {
838  // Get clip object from the iterator
839  Clip *existing_clip = (*clip_itr);
840  root["clips"].append(existing_clip->JsonValue());
841  }
842 
843  // Add array of effects
844  root["effects"] = Json::Value(Json::arrayValue);
845 
846  // loop through effects
847  list<EffectBase*>::iterator effect_itr;
848  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
849  {
850  // Get clip object from the iterator
851  EffectBase *existing_effect = (*effect_itr);
852  root["effects"].append(existing_effect->JsonValue());
853  }
854 
855  // return JsonValue
856  return root;
857 }
858 
859 // Load JSON string into this object
860 void Timeline::SetJson(string value) throw(InvalidJSON) {
861 
862  // Parse JSON string into JSON objects
863  Json::Value root;
864  Json::Reader reader;
865  bool success = reader.parse( value, root );
866  if (!success)
867  // Raise exception
868  throw InvalidJSON("JSON could not be parsed (or is invalid)", "");
869 
870  try
871  {
872  // Set all values that match
873  SetJsonValue(root);
874  }
875  catch (exception e)
876  {
877  // Error parsing JSON (or missing keys)
878  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
879  }
880 }
881 
882 // Load Json::JsonValue into this object
883 void Timeline::SetJsonValue(Json::Value root) throw(InvalidFile, ReaderClosed) {
884 
885  // Close timeline before we do anything (this also removes all open and closing clips)
886  Close();
887 
888  // Set parent data
890 
891  if (!root["clips"].isNull()) {
892  // Clear existing clips
893  clips.clear();
894 
895  // loop through clips
896  for (int x = 0; x < root["clips"].size(); x++) {
897  // Get each clip
898  Json::Value existing_clip = root["clips"][x];
899 
900  // Create Clip
901  Clip *c = new Clip();
902 
903  // Load Json into Clip
904  c->SetJsonValue(existing_clip);
905 
906  // Add Clip to Timeline
907  AddClip(c);
908  }
909  }
910 
911  if (!root["effects"].isNull()) {
912  // Clear existing effects
913  effects.clear();
914 
915  // loop through effects
916  for (int x = 0; x < root["effects"].size(); x++) {
917  // Get each effect
918  Json::Value existing_effect = root["effects"][x];
919 
920  // Create Effect
921  EffectBase *e = NULL;
922 
923  if (!existing_effect["type"].isNull()) {
924  // Create instance of effect
925  e = EffectInfo().CreateEffect(existing_effect["type"].asString());
926 
927  // Load Json into Effect
928  e->SetJsonValue(existing_effect);
929 
930  // Add Effect to Timeline
931  AddEffect(e);
932  }
933  }
934  }
935 
936  if (!root["duration"].isNull()) {
937  // Update duration of timeline
938  info.duration = root["duration"].asDouble();
940  }
941 }
942 
943 // Apply a special formatted JSON object, which represents a change to the timeline (insert, update, delete)
945 
946  // Clear internal cache (since things are about to change)
947  final_cache.Clear();
948 
949  // Parse JSON string into JSON objects
950  Json::Value root;
951  Json::Reader reader;
952  bool success = reader.parse( value, root );
953  if (!success || !root.isArray())
954  // Raise exception
955  throw InvalidJSON("JSON could not be parsed (or is invalid).", "");
956 
957  try
958  {
959  // Process the JSON change array, loop through each item
960  for (int x = 0; x < root.size(); x++) {
961  // Get each change
962  Json::Value change = root[x];
963  string root_key = change["key"][(uint)0].asString();
964 
965  // Process each type of change
966  if (root_key == "clips")
967  // Apply to CLIPS
968  apply_json_to_clips(change);
969 
970  else if (root_key == "effects")
971  // Apply to EFFECTS
972  apply_json_to_effects(change);
973 
974  else
975  // Apply to TIMELINE
976  apply_json_to_timeline(change);
977 
978  }
979  }
980  catch (exception e)
981  {
982  // Error parsing JSON (or missing keys)
983  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
984  }
985 
986  // Adjust cache (in case something changed)
988 }
989 
990 // Apply JSON diff to clips
991 void Timeline::apply_json_to_clips(Json::Value change) throw(InvalidJSONKey) {
992 
993  // Get key and type of change
994  string change_type = change["type"].asString();
995  string clip_id = "";
996  Clip *existing_clip = NULL;
997 
998  // Find id of clip (if any)
999  for (int x = 0; x < change["key"].size(); x++) {
1000  // Get each change
1001  Json::Value key_part = change["key"][x];
1002 
1003  if (key_part.isObject()) {
1004  // Check for id
1005  if (!key_part["id"].isNull()) {
1006  // Set the id
1007  clip_id = key_part["id"].asString();
1008 
1009  // Find matching clip in timeline (if any)
1010  list<Clip*>::iterator clip_itr;
1011  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
1012  {
1013  // Get clip object from the iterator
1014  Clip *c = (*clip_itr);
1015  if (c->Id() == clip_id) {
1016  existing_clip = c;
1017  break; // clip found, exit loop
1018  }
1019  }
1020  break; // id found, exit loop
1021  }
1022  }
1023  }
1024 
1025  // Check for a more specific key (targetting this clip's effects)
1026  // For example: ["clips", {"id:123}, "effects", {"id":432}]
1027  if (existing_clip && change["key"].size() == 4 && change["key"][2] == "effects")
1028  {
1029  // This change is actually targetting a specific effect under a clip (and not the clip)
1030  Json::Value key_part = change["key"][3];
1031 
1032  if (key_part.isObject()) {
1033  // Check for id
1034  if (!key_part["id"].isNull())
1035  {
1036  // Set the id
1037  string effect_id = key_part["id"].asString();
1038 
1039  // Find matching effect in timeline (if any)
1040  list<EffectBase*> effect_list = existing_clip->Effects();
1041  list<EffectBase*>::iterator effect_itr;
1042  for (effect_itr=effect_list.begin(); effect_itr != effect_list.end(); ++effect_itr)
1043  {
1044  // Get effect object from the iterator
1045  EffectBase *e = (*effect_itr);
1046  if (e->Id() == effect_id) {
1047  // Apply the change to the effect directly
1048  apply_json_to_effects(change, e);
1049  return; // effect found, don't update clip
1050  }
1051  }
1052  }
1053  }
1054  }
1055 
1056  // Determine type of change operation
1057  if (change_type == "insert") {
1058 
1059  // Create new clip
1060  Clip *clip = new Clip();
1061  clip->SetJsonValue(change["value"]); // Set properties of new clip from JSON
1062  AddClip(clip); // Add clip to timeline
1063 
1064  } else if (change_type == "update") {
1065 
1066  // Update existing clip
1067  if (existing_clip)
1068  existing_clip->SetJsonValue(change["value"]); // Update clip properties from JSON
1069 
1070  } else if (change_type == "delete") {
1071 
1072  // Remove existing clip
1073  if (existing_clip)
1074  RemoveClip(existing_clip); // Remove clip from timeline
1075 
1076  }
1077 
1078 }
1079 
1080 // Apply JSON diff to effects
1081 void Timeline::apply_json_to_effects(Json::Value change) throw(InvalidJSONKey) {
1082 
1083  // Get key and type of change
1084  string change_type = change["type"].asString();
1085  EffectBase *existing_effect = NULL;
1086 
1087  // Find id of an effect (if any)
1088  for (int x = 0; x < change["key"].size(); x++) {
1089  // Get each change
1090  Json::Value key_part = change["key"][x];
1091 
1092  if (key_part.isObject()) {
1093  // Check for id
1094  if (!key_part["id"].isNull())
1095  {
1096  // Set the id
1097  string effect_id = key_part["id"].asString();
1098 
1099  // Find matching effect in timeline (if any)
1100  list<EffectBase*>::iterator effect_itr;
1101  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
1102  {
1103  // Get effect object from the iterator
1104  EffectBase *e = (*effect_itr);
1105  if (e->Id() == effect_id) {
1106  existing_effect = e;
1107  break; // effect found, exit loop
1108  }
1109  }
1110  break; // id found, exit loop
1111  }
1112  }
1113  }
1114 
1115  // Now that we found the effect, apply the change to it
1116  if (existing_effect || change_type == "insert")
1117  // Apply change to effect
1118  apply_json_to_effects(change, existing_effect);
1119 }
1120 
1121 // Apply JSON diff to effects (if you already know which effect needs to be updated)
1122 void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_effect) throw(InvalidJSONKey) {
1123 
1124  // Get key and type of change
1125  string change_type = change["type"].asString();
1126 
1127  // Determine type of change operation
1128  if (change_type == "insert") {
1129 
1130  // Determine type of effect
1131  string effect_type = change["value"]["type"].asString();
1132 
1133  // Create Effect
1134  EffectBase *e = NULL;
1135 
1136  // Init the matching effect object
1137  e = EffectInfo().CreateEffect(effect_type);
1138 
1139  // Load Json into Effect
1140  e->SetJsonValue(change["value"]);
1141 
1142  // Add Effect to Timeline
1143  AddEffect(e);
1144 
1145  } else if (change_type == "update") {
1146 
1147  // Update existing effect
1148  if (existing_effect)
1149  existing_effect->SetJsonValue(change["value"]); // Update effect properties from JSON
1150 
1151  } else if (change_type == "delete") {
1152 
1153  // Remove existing effect
1154  if (existing_effect)
1155  RemoveEffect(existing_effect); // Remove effect from timeline
1156 
1157  }
1158 }
1159 
1160 // Apply JSON diff to timeline properties
1161 void Timeline::apply_json_to_timeline(Json::Value change) throw(InvalidJSONKey) {
1162 
1163  // Get key and type of change
1164  string change_type = change["type"].asString();
1165  string root_key = change["key"][(uint)0].asString();
1166  string sub_key = "";
1167  if (change["key"].size() >= 2)
1168  sub_key = change["key"][(uint)1].asString();
1169 
1170  // Determine type of change operation
1171  if (change_type == "insert" || change_type == "update") {
1172 
1173  // INSERT / UPDATE
1174  // Check for valid property
1175  if (root_key == "color")
1176  // Set color
1177  color.SetJsonValue(change["value"]);
1178  else if (root_key == "viewport_scale")
1179  // Set viewport scale
1180  viewport_scale.SetJsonValue(change["value"]);
1181  else if (root_key == "viewport_x")
1182  // Set viewport x offset
1183  viewport_x.SetJsonValue(change["value"]);
1184  else if (root_key == "viewport_y")
1185  // Set viewport y offset
1186  viewport_y.SetJsonValue(change["value"]);
1187  else if (root_key == "duration") {
1188  // Update duration of timeline
1189  info.duration = change["value"].asDouble();
1191  }
1192  else if (root_key == "width")
1193  // Set width
1194  info.width = change["value"].asInt();
1195  else if (root_key == "height")
1196  // Set height
1197  info.height = change["value"].asInt();
1198  else if (root_key == "fps" && sub_key == "" && change["value"].isObject()) {
1199  // Set fps fraction
1200  if (!change["value"]["num"].isNull())
1201  info.fps.num = change["value"]["num"].asInt();
1202  if (!change["value"]["den"].isNull())
1203  info.fps.den = change["value"]["den"].asInt();
1204  }
1205  else if (root_key == "fps" && sub_key == "num")
1206  // Set fps.num
1207  info.fps.num = change["value"].asInt();
1208  else if (root_key == "fps" && sub_key == "den")
1209  // Set fps.den
1210  info.fps.den = change["value"].asInt();
1211  else if (root_key == "sample_rate")
1212  // Set sample rate
1213  info.sample_rate = change["value"].asInt();
1214  else if (root_key == "channels")
1215  // Set channels
1216  info.channels = change["value"].asInt();
1217  else if (root_key == "channel_layout")
1218  // Set channel layout
1219  info.channel_layout = (ChannelLayout) change["value"].asInt();
1220 
1221  else
1222 
1223  // Error parsing JSON (or missing keys)
1224  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1225 
1226 
1227  } else if (change["type"].asString() == "delete") {
1228 
1229  // DELETE / RESET
1230  // Reset the following properties (since we can't delete them)
1231  if (root_key == "color") {
1232  color = Color();
1233  color.red = Keyframe(0.0);
1234  color.green = Keyframe(0.0);
1235  color.blue = Keyframe(0.0);
1236  }
1237  else if (root_key == "viewport_scale")
1238  viewport_scale = Keyframe(1.0);
1239  else if (root_key == "viewport_x")
1240  viewport_x = Keyframe(0.0);
1241  else if (root_key == "viewport_y")
1242  viewport_y = Keyframe(0.0);
1243  else
1244  // Error parsing JSON (or missing keys)
1245  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1246 
1247  }
1248 
1249 }
1250 
1251 
1252 
1253 
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Timeline.cpp:883
tr1::shared_ptr< Frame > GetFrame(long int requested_frame)
Definition: Timeline.cpp:601
void Close()
Close the internal reader.
Definition: Clip.cpp:221
string Json()
Get and Set JSON methods.
Definition: Timeline.cpp:814
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Color.cpp:92
int num
Numerator for the fraction.
Definition: Fraction.h:44
Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:219
CriticalSection getFrameCriticalSection
Section lock for multiple threads.
Definition: ReaderBase.h:99
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:66
EffectBase * CreateEffect(string effect_type)
Definition: EffectInfo.cpp:42
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:42
void Add(long int frame_number, tr1::shared_ptr< Frame > frame)
Add a Frame to the cache.
Definition: Cache.cpp:57
Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:46
Keyframe viewport_scale
Curve representing the scale of the viewport (0 to 100)
Definition: Timeline.h:240
float End()
Override End() method.
Definition: Clip.cpp:235
tr1::shared_ptr< Frame > GetFrame(long int requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Definition: Clip.cpp:257
Align clip to the bottom right of its parent.
Definition: Enums.h:45
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: KeyFrame.cpp:319
ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:83
GravityType gravity
The gravity of a clip determines where it snaps to it&#39;s parent.
Definition: Clip.h:151
Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition: Color.h:48
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:67
Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:229
This class represents a single frame of video (i.e. image & audio data)
Definition: Frame.h:115
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:41
Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:45
float duration
Length of time (in seconds)
Definition: ReaderBase.h:64
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Timeline.cpp:821
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Definition: Enums.h:51
float End()
Get end position (in seconds) of clip (trim end of video)
Definition: ClipBase.h:80
Keyframe viewport_y
Curve representing the y coordinate for the viewport.
Definition: Timeline.h:242
Fraction Reciprocal()
Return the reciprocal as a Fraction.
Definition: Fraction.cpp:81
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:95
int Layer()
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.h:78
#define OPEN_MP_NUM_PROCESSORS
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:234
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:61
virtual tr1::shared_ptr< Frame > GetFrame(tr1::shared_ptr< Frame > frame, long int frame_number)=0
This method is required for all derived classes of EffectBase, and returns a modified openshot::Frame...
Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:232
Align clip to the top right of its parent.
Definition: Enums.h:39
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: EffectBase.cpp:69
Align clip to the bottom left of its parent.
Definition: Enums.h:43
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Clip.cpp:778
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: KeyFrame.cpp:360
Exception for missing JSON Change key.
Definition: Exceptions.h:182
void SetMaxBytesFromInfo(long int number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition: Cache.cpp:257
Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:220
float GetValue(long int index)
Get the value at a specific index.
Definition: KeyFrame.cpp:224
Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:221
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:62
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:109
void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout)
Change frame rate or audio mapping details.
Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:47
bool Waveform()
Waveform property.
Definition: Clip.h:214
ScaleType scale
The scale determines how a clip should be resized to fit it&#39;s parent.
Definition: Clip.h:152
int height
The height of the video (in pixels)
Definition: ReaderBase.h:66
Align clip to the bottom center of its parent.
Definition: Enums.h:44
Exception for files that can not be found or opened.
Definition: Exceptions.h:132
string Id()
Get basic properties.
Definition: ClipBase.h:76
Keyframe channel_filter
Audio channel filter and mappings.
Definition: Clip.h:254
float Position()
Get position on timeline (in seconds)
Definition: ClipBase.h:77
void ApplyMapperToClips()
Apply the timeline&#39;s framerate and samplerate to all clips.
Definition: Timeline.cpp:125
void Reader(ReaderBase *new_reader)
Set the current reader.
Definition: Clip.cpp:187
list< EffectBase * > Effects()
Return the list of effects on the timeline.
Definition: Clip.h:177
void AppendDebugMethod(string method_name, string arg1_name, float arg1_value, string arg2_name, float arg2_value, string arg3_name, float arg3_value, string arg4_name, float arg4_value, string arg5_name, float arg5_value, string arg6_name, float arg6_value)
Append debug information.
Definition: ZmqLogger.cpp:154
This class represents a fraction.
Definition: Fraction.h:42
Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
Definition: Clip.h:255
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround...
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:40
void AddClip(Clip *clip)
Add an openshot::Clip to the timeline.
Definition: Timeline.cpp:64
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: ReaderBase.cpp:104
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: EffectBase.cpp:109
void Close()
Close the timeline reader (and any resources it was consuming)
Definition: Timeline.cpp:566
Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:225
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: ReaderBase.cpp:153
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:53
vector< Point > Points
Vector of all Points.
Definition: KeyFrame.h:92
ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:108
Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:69
Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Definition: ReaderBase.h:76
Exception for frames that are out of bounds.
Definition: Exceptions.h:202
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
Definition: FrameMapper.h:139
void Open()
Open the internal reader.
Definition: Clip.cpp:204
This class represents a color (used on the timeline and clips)
Definition: Color.h:42
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Definition: ZmqLogger.cpp:38
int GetInt(long int index)
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:246
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:41
void Open()
Open the reader (and start consuming resources)
Definition: Timeline.cpp:589
void ApplyJsonDiff(string value)
Apply a special formatted JSON object, which represents a change to the timeline (add, update, delete) This is primarily designed to keep the timeline (and its child objects... such as clips and effects) in sync with another application... such as OpenShot Video Editor (http://www.openshot.org).
Definition: Timeline.cpp:944
tr1::shared_ptr< Frame > GetFrame(long int frame_number)
Get a frame from the cache.
Definition: Cache.cpp:79
void Clear()
Clear the cache of all frames.
Definition: Cache.cpp:203
This namespace is the default namespace for all code in the openshot library.
Do not apply pull-down techniques, just repeat or skip entire frames.
Definition: FrameMapper.h:64
void RemoveClip(Clip *clip)
Remove an openshot::Clip from the timeline.
Definition: Timeline.cpp:95
void RemoveEffect(EffectBase *effect)
Remove an effect from the timeline.
Definition: Timeline.cpp:89
Exception for invalid JSON.
Definition: Exceptions.h:152
Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:224
Keyframe viewport_x
Curve representing the x coordinate for the viewport.
Definition: Timeline.h:241
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Color.cpp:129
Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:218
string GetColorHex(long int frame_number)
Get the HEX value of a color at a specific frame.
Definition: Color.cpp:64
Color color
Background color of timeline canvas.
Definition: Timeline.h:245
Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout)
Default Constructor for the timeline (which sets the canvas width and height and FPS) ...
Definition: Timeline.cpp:33
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:45
Align clip to the top center of its parent.
Definition: Enums.h:38
void SetJson(string value)
Load JSON string into this object.
Definition: Timeline.cpp:860
int den
Denominator for the fraction.
Definition: Fraction.h:45
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:82
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Definition: KeyFrame.h:64
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:52
long int video_length
The number of frames in the video stream.
Definition: ReaderBase.h:74
void AddEffect(EffectBase *effect)
Add an effect to the timeline.
Definition: Timeline.cpp:79
int GetSamplesPerFrame(Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:497
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Clip.cpp:699
float Duration()
Get the length of this clip (in seconds)
Definition: ClipBase.h:81
float Start()
Get start position (in seconds) of clip (trim start of video)
Definition: ClipBase.h:79
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:81
Exception when too many seek attempts happen.
Definition: Exceptions.h:254