OpenShot Library | libopenshot 0.4.0
Timeline.cpp
Go to the documentation of this file.
1
9// Copyright (c) 2008-2019 OpenShot Studios, LLC
10//
11// SPDX-License-Identifier: LGPL-3.0-or-later
12
13#include "Timeline.h"
14
15#include "CacheBase.h"
16#include "CacheDisk.h"
17#include "CacheMemory.h"
18#include "CrashHandler.h"
19#include "FrameMapper.h"
20#include "Exceptions.h"
21
22#include <QDir>
23#include <QFileInfo>
24
25using namespace openshot;
26
27// Default Constructor for the timeline (which sets the canvas width and height)
28Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout) :
29 is_open(false), auto_map_clips(true), managed_cache(true), path(""),
30 max_concurrent_frames(OPEN_MP_NUM_PROCESSORS), max_time(0.0)
31{
32 // Create CrashHandler and Attach (incase of errors)
34
35 // Init viewport size (curve based, because it can be animated)
36 viewport_scale = Keyframe(100.0);
37 viewport_x = Keyframe(0.0);
38 viewport_y = Keyframe(0.0);
39
40 // Init background color
41 color.red = Keyframe(0.0);
42 color.green = Keyframe(0.0);
43 color.blue = Keyframe(0.0);
44
45 // Init FileInfo struct (clear all values)
46 info.width = width;
47 info.height = height;
50 info.fps = fps;
51 info.sample_rate = sample_rate;
52 info.channels = channels;
53 info.channel_layout = channel_layout;
55 info.duration = 60 * 30; // 30 minute default duration
56 info.has_audio = true;
57 info.has_video = true;
59 info.display_ratio = openshot::Fraction(width, height);
62 info.acodec = "openshot::timeline";
63 info.vcodec = "openshot::timeline";
64
65 // Init max image size
67
68 // Init cache
69 final_cache = new CacheMemory();
70 final_cache->SetMaxBytesFromInfo(max_concurrent_frames * 4, info.width, info.height, info.sample_rate, info.channels);
71}
72
73// Delegating constructor that copies parameters from a provided ReaderInfo
75 info.width, info.height, info.fps, info.sample_rate,
76 info.channels, info.channel_layout) {}
77
78// Constructor for the timeline (which loads a JSON structure from a file path, and initializes a timeline)
79Timeline::Timeline(const std::string& projectPath, bool convert_absolute_paths) :
80 is_open(false), auto_map_clips(true), managed_cache(true), path(projectPath),
81 max_concurrent_frames(OPEN_MP_NUM_PROCESSORS), max_time(0.0) {
82
83 // Create CrashHandler and Attach (incase of errors)
85
86 // Init final cache as NULL (will be created after loading json)
87 final_cache = NULL;
88
89 // Init viewport size (curve based, because it can be animated)
90 viewport_scale = Keyframe(100.0);
91 viewport_x = Keyframe(0.0);
92 viewport_y = Keyframe(0.0);
93
94 // Init background color
95 color.red = Keyframe(0.0);
96 color.green = Keyframe(0.0);
97 color.blue = Keyframe(0.0);
98
99 // Check if path exists
100 QFileInfo filePath(QString::fromStdString(path));
101 if (!filePath.exists()) {
102 throw InvalidFile("File could not be opened.", path);
103 }
104
105 // Check OpenShot Install Path exists
107 QDir openshotPath(QString::fromStdString(s->PATH_OPENSHOT_INSTALL));
108 if (!openshotPath.exists()) {
109 throw InvalidFile("PATH_OPENSHOT_INSTALL could not be found.", s->PATH_OPENSHOT_INSTALL);
110 }
111 QDir openshotTransPath(openshotPath.filePath("transitions"));
112 if (!openshotTransPath.exists()) {
113 throw InvalidFile("PATH_OPENSHOT_INSTALL/transitions could not be found.", openshotTransPath.path().toStdString());
114 }
115
116 // Determine asset path
117 QString asset_name = filePath.baseName().left(30) + "_assets";
118 QDir asset_folder(filePath.dir().filePath(asset_name));
119 if (!asset_folder.exists()) {
120 // Create directory if needed
121 asset_folder.mkpath(".");
122 }
123
124 // Load UTF-8 project file into QString
125 QFile projectFile(QString::fromStdString(path));
126 projectFile.open(QFile::ReadOnly);
127 QString projectContents = QString::fromUtf8(projectFile.readAll());
128
129 // Convert all relative paths into absolute paths (if requested)
130 if (convert_absolute_paths) {
131
132 // Find all "image" or "path" references in JSON (using regex). Must loop through match results
133 // due to our path matching needs, which are not possible with the QString::replace() function.
134 QRegularExpression allPathsRegex(QStringLiteral("\"(image|path)\":.*?\"(.*?)\""));
135 std::vector<QRegularExpressionMatch> matchedPositions;
136 QRegularExpressionMatchIterator i = allPathsRegex.globalMatch(projectContents);
137 while (i.hasNext()) {
138 QRegularExpressionMatch match = i.next();
139 if (match.hasMatch()) {
140 // Push all match objects into a vector (so we can reverse them later)
141 matchedPositions.push_back(match);
142 }
143 }
144
145 // Reverse the matches (bottom of file to top, so our replacements don't break our match positions)
146 std::vector<QRegularExpressionMatch>::reverse_iterator itr;
147 for (itr = matchedPositions.rbegin(); itr != matchedPositions.rend(); itr++) {
148 QRegularExpressionMatch match = *itr;
149 QString relativeKey = match.captured(1); // image or path
150 QString relativePath = match.captured(2); // relative file path
151 QString absolutePath = "";
152
153 // Find absolute path of all path, image (including special replacements of @assets and @transitions)
154 if (relativePath.startsWith("@assets")) {
155 absolutePath = QFileInfo(asset_folder.absoluteFilePath(relativePath.replace("@assets", "."))).canonicalFilePath();
156 } else if (relativePath.startsWith("@transitions")) {
157 absolutePath = QFileInfo(openshotTransPath.absoluteFilePath(relativePath.replace("@transitions", "."))).canonicalFilePath();
158 } else {
159 absolutePath = QFileInfo(filePath.absoluteDir().absoluteFilePath(relativePath)).canonicalFilePath();
160 }
161
162 // Replace path in JSON content, if an absolute path was successfully found
163 if (!absolutePath.isEmpty()) {
164 projectContents.replace(match.capturedStart(0), match.capturedLength(0), "\"" + relativeKey + "\": \"" + absolutePath + "\"");
165 }
166 }
167 // Clear matches
168 matchedPositions.clear();
169 }
170
171 // Set JSON of project
172 SetJson(projectContents.toStdString());
173
174 // Calculate valid duration and set has_audio and has_video
175 // based on content inside this Timeline's clips.
176 float calculated_duration = 0.0;
177 for (auto clip : clips)
178 {
179 float clip_last_frame = clip->Position() + clip->Duration();
180 if (clip_last_frame > calculated_duration)
181 calculated_duration = clip_last_frame;
182 if (clip->Reader() && clip->Reader()->info.has_audio)
183 info.has_audio = true;
184 if (clip->Reader() && clip->Reader()->info.has_video)
185 info.has_video = true;
186
187 }
188 info.video_length = calculated_duration * info.fps.ToFloat();
189 info.duration = calculated_duration;
190
191 // Init FileInfo settings
192 info.acodec = "openshot::timeline";
193 info.vcodec = "openshot::timeline";
195 info.has_video = true;
196 info.has_audio = true;
197
198 // Init max image size
200
201 // Init cache
202 final_cache = new CacheMemory();
203 final_cache->SetMaxBytesFromInfo(max_concurrent_frames * 4, info.width, info.height, info.sample_rate, info.channels);
204}
205
207 if (is_open) {
208 // Auto Close if not already
209 Close();
210 }
211
212 // Remove all clips, effects, and frame mappers
213 Clear();
214
215 // Destroy previous cache (if managed by timeline)
216 if (managed_cache && final_cache) {
217 delete final_cache;
218 final_cache = NULL;
219 }
220}
221
222// Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
223void Timeline::AddTrackedObject(std::shared_ptr<openshot::TrackedObjectBase> trackedObject){
224
225 // Search for the tracked object on the map
226 auto iterator = tracked_objects.find(trackedObject->Id());
227
228 if (iterator != tracked_objects.end()){
229 // Tracked object's id already present on the map, overwrite it
230 iterator->second = trackedObject;
231 }
232 else{
233 // Tracked object's id not present -> insert it on the map
234 tracked_objects[trackedObject->Id()] = trackedObject;
235 }
236
237 return;
238}
239
240// Return tracked object pointer by it's id
241std::shared_ptr<openshot::TrackedObjectBase> Timeline::GetTrackedObject(std::string id) const{
242
243 // Search for the tracked object on the map
244 auto iterator = tracked_objects.find(id);
245
246 if (iterator != tracked_objects.end()){
247 // Id found, return the pointer to the tracked object
248 std::shared_ptr<openshot::TrackedObjectBase> trackedObject = iterator->second;
249 return trackedObject;
250 }
251 else {
252 // Id not found, return a null pointer
253 return nullptr;
254 }
255}
256
257// Return the ID's of the tracked objects as a list of strings
258std::list<std::string> Timeline::GetTrackedObjectsIds() const{
259
260 // Create a list of strings
261 std::list<std::string> trackedObjects_ids;
262
263 // Iterate through the tracked_objects map
264 for (auto const& it: tracked_objects){
265 // Add the IDs to the list
266 trackedObjects_ids.push_back(it.first);
267 }
268
269 return trackedObjects_ids;
270}
271
272#ifdef USE_OPENCV
273// Return the trackedObject's properties as a JSON string
274std::string Timeline::GetTrackedObjectValues(std::string id, int64_t frame_number) const {
275
276 // Initialize the JSON object
277 Json::Value trackedObjectJson;
278
279 // Search for the tracked object on the map
280 auto iterator = tracked_objects.find(id);
281
282 if (iterator != tracked_objects.end())
283 {
284 // Id found, Get the object pointer and cast it as a TrackedObjectBBox
285 std::shared_ptr<TrackedObjectBBox> trackedObject = std::static_pointer_cast<TrackedObjectBBox>(iterator->second);
286
287 // Get the trackedObject values for it's first frame
288 if (trackedObject->ExactlyContains(frame_number)){
289 BBox box = trackedObject->GetBox(frame_number);
290 float x1 = box.cx - (box.width/2);
291 float y1 = box.cy - (box.height/2);
292 float x2 = box.cx + (box.width/2);
293 float y2 = box.cy + (box.height/2);
294 float rotation = box.angle;
295
296 trackedObjectJson["x1"] = x1;
297 trackedObjectJson["y1"] = y1;
298 trackedObjectJson["x2"] = x2;
299 trackedObjectJson["y2"] = y2;
300 trackedObjectJson["rotation"] = rotation;
301
302 } else {
303 BBox box = trackedObject->BoxVec.begin()->second;
304 float x1 = box.cx - (box.width/2);
305 float y1 = box.cy - (box.height/2);
306 float x2 = box.cx + (box.width/2);
307 float y2 = box.cy + (box.height/2);
308 float rotation = box.angle;
309
310 trackedObjectJson["x1"] = x1;
311 trackedObjectJson["y1"] = y1;
312 trackedObjectJson["x2"] = x2;
313 trackedObjectJson["y2"] = y2;
314 trackedObjectJson["rotation"] = rotation;
315 }
316
317 }
318 else {
319 // Id not found, return all 0 values
320 trackedObjectJson["x1"] = 0;
321 trackedObjectJson["y1"] = 0;
322 trackedObjectJson["x2"] = 0;
323 trackedObjectJson["y2"] = 0;
324 trackedObjectJson["rotation"] = 0;
325 }
326
327 return trackedObjectJson.toStyledString();
328}
329#endif
330
331// Add an openshot::Clip to the timeline
333{
334 // Get lock (prevent getting frames while this happens)
335 const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
336
337 // Assign timeline to clip
338 clip->ParentTimeline(this);
339
340 // Clear cache of clip and nested reader (if any)
341 if (clip->Reader() && clip->Reader()->GetCache())
342 clip->Reader()->GetCache()->Clear();
343
344 // All clips should be converted to the frame rate of this timeline
345 if (auto_map_clips) {
346 // Apply framemapper (or update existing framemapper)
347 apply_mapper_to_clip(clip);
348 }
349
350 // Add clip to list
351 clips.push_back(clip);
352
353 // Sort clips
354 sort_clips();
355}
356
357// Add an effect to the timeline
359{
360 // Assign timeline to effect
361 effect->ParentTimeline(this);
362
363 // Add effect to list
364 effects.push_back(effect);
365
366 // Sort effects
367 sort_effects();
368}
369
370// Remove an effect from the timeline
372{
373 effects.remove(effect);
374
375 // Delete effect object (if timeline allocated it)
376 bool allocated = allocated_effects.count(effect);
377 if (allocated) {
378 delete effect;
379 effect = NULL;
380 allocated_effects.erase(effect);
381 }
382
383 // Sort effects
384 sort_effects();
385}
386
387// Remove an openshot::Clip to the timeline
389{
390 // Get lock (prevent getting frames while this happens)
391 const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
392
393 clips.remove(clip);
394
395 // Delete clip object (if timeline allocated it)
396 bool allocated = allocated_clips.count(clip);
397 if (allocated) {
398 delete clip;
399 clip = NULL;
400 allocated_clips.erase(clip);
401 }
402
403 // Sort clips
404 sort_clips();
405}
406
407// Look up a clip
408openshot::Clip* Timeline::GetClip(const std::string& id)
409{
410 // Find the matching clip (if any)
411 for (const auto& clip : clips) {
412 if (clip->Id() == id) {
413 return clip;
414 }
415 }
416 return nullptr;
417}
418
419// Look up a timeline effect
421{
422 // Find the matching effect (if any)
423 for (const auto& effect : effects) {
424 if (effect->Id() == id) {
425 return effect;
426 }
427 }
428 return nullptr;
429}
430
432{
433 // Search all clips for matching effect ID
434 for (const auto& clip : clips) {
435 const auto e = clip->GetEffect(id);
436 if (e != nullptr) {
437 return e;
438 }
439 }
440 return nullptr;
441}
442
443// Return the list of effects on all clips
444std::list<openshot::EffectBase*> Timeline::ClipEffects() const {
445
446 // Initialize the list
447 std::list<EffectBase*> timelineEffectsList;
448
449 // Loop through all clips
450 for (const auto& clip : clips) {
451
452 // Get the clip's list of effects
453 std::list<EffectBase*> clipEffectsList = clip->Effects();
454
455 // Append the clip's effects to the list
456 timelineEffectsList.insert(timelineEffectsList.end(), clipEffectsList.begin(), clipEffectsList.end());
457 }
458
459 return timelineEffectsList;
460}
461
462// Compute the end time of the latest timeline element
464 // Return cached max_time variable (threadsafe)
465 return max_time;
466}
467
468// Compute the highest frame# based on the latest time and FPS
470 double fps = info.fps.ToDouble();
471 auto max_time = GetMaxTime();
472 return std::round(max_time * fps);
473}
474
475// Compute the start time of the first timeline clip
477 // Return cached min_time variable (threadsafe)
478 return min_time;
479}
480
481// Compute the first frame# based on the first clip position
483 double fps = info.fps.ToDouble();
484 auto min_time = GetMinTime();
485 return std::round(min_time * fps) + 1;
486}
487
488// Apply a FrameMapper to a clip which matches the settings of this timeline
489void Timeline::apply_mapper_to_clip(Clip* clip)
490{
491 // Determine type of reader
492 ReaderBase* clip_reader = NULL;
493 if (clip->Reader()->Name() == "FrameMapper")
494 {
495 // Get the existing reader
496 clip_reader = (ReaderBase*) clip->Reader();
497
498 // Update the mapping
499 FrameMapper* clip_mapped_reader = (FrameMapper*) clip_reader;
501
502 } else {
503
504 // Create a new FrameMapper to wrap the current reader
506 allocated_frame_mappers.insert(mapper);
507 clip_reader = (ReaderBase*) mapper;
508 }
509
510 // Update clip reader
511 clip->Reader(clip_reader);
512}
513
514// Apply the timeline's framerate and samplerate to all clips
516{
517 // Clear all cached frames
519
520 // Loop through all clips
521 for (auto clip : clips)
522 {
523 // Apply framemapper (or update existing framemapper)
524 apply_mapper_to_clip(clip);
525 }
526}
527
528// Calculate time of a frame number, based on a framerate
529double Timeline::calculate_time(int64_t number, Fraction rate)
530{
531 // Get float version of fps fraction
532 double raw_fps = rate.ToFloat();
533
534 // Return the time (in seconds) of this frame
535 return double(number - 1) / raw_fps;
536}
537
538// Apply effects to the source frame (if any)
539std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct* options)
540{
541 // Debug output
543 "Timeline::apply_effects",
544 "frame->number", frame->number,
545 "timeline_frame_number", timeline_frame_number,
546 "layer", layer);
547
548 // Find Effects at this position and layer
549 for (auto effect : effects)
550 {
551 // Does clip intersect the current requested time
552 long effect_start_position = round(effect->Position() * info.fps.ToDouble()) + 1;
553 long effect_end_position = round((effect->Position() + (effect->Duration())) * info.fps.ToDouble());
554
555 bool does_effect_intersect = (effect_start_position <= timeline_frame_number && effect_end_position >= timeline_frame_number && effect->Layer() == layer);
556
557 // Clip is visible
558 if (does_effect_intersect)
559 {
560 // Determine the frame needed for this clip (based on the position on the timeline)
561 long effect_start_frame = (effect->Start() * info.fps.ToDouble()) + 1;
562 long effect_frame_number = timeline_frame_number - effect_start_position + effect_start_frame;
563
564 if (!options->is_top_clip)
565 continue; // skip effect, if overlapped/covered by another clip on same layer
566
567 if (options->is_before_clip_keyframes != effect->info.apply_before_clip)
568 continue; // skip effect, if this filter does not match
569
570 // Debug output
572 "Timeline::apply_effects (Process Effect)",
573 "effect_frame_number", effect_frame_number,
574 "does_effect_intersect", does_effect_intersect);
575
576 // Apply the effect to this frame
577 frame = effect->GetFrame(frame, effect_frame_number);
578 }
579
580 } // end effect loop
581
582 // Return modified frame
583 return frame;
584}
585
586// Get or generate a blank frame
587std::shared_ptr<Frame> Timeline::GetOrCreateFrame(std::shared_ptr<Frame> background_frame, Clip* clip, int64_t number, openshot::TimelineInfoStruct* options)
588{
589 std::shared_ptr<Frame> new_frame;
590
591 // Init some basic properties about this frame
592 int samples_in_frame = Frame::GetSamplesPerFrame(number, info.fps, info.sample_rate, info.channels);
593
594 try {
595 // Debug output
597 "Timeline::GetOrCreateFrame (from reader)",
598 "number", number,
599 "samples_in_frame", samples_in_frame);
600
601 // Attempt to get a frame (but this could fail if a reader has just been closed)
602 new_frame = std::shared_ptr<Frame>(clip->GetFrame(background_frame, number, options));
603
604 // Return real frame
605 return new_frame;
606
607 } catch (const ReaderClosed & e) {
608 // ...
609 } catch (const OutOfBoundsFrame & e) {
610 // ...
611 }
612
613 // Debug output
615 "Timeline::GetOrCreateFrame (create blank)",
616 "number", number,
617 "samples_in_frame", samples_in_frame);
618
619 // Create blank frame
620 return new_frame;
621}
622
623// Process a new layer of video or audio
624void Timeline::add_layer(std::shared_ptr<Frame> new_frame, Clip* source_clip, int64_t clip_frame_number, bool is_top_clip, float max_volume)
625{
626 // Create timeline options (with details about this current frame request)
627 TimelineInfoStruct* options = new TimelineInfoStruct();
628 options->is_top_clip = is_top_clip;
629 options->is_before_clip_keyframes = true;
630
631 // Get the clip's frame, composited on top of the current timeline frame
632 std::shared_ptr<Frame> source_frame;
633 source_frame = GetOrCreateFrame(new_frame, source_clip, clip_frame_number, options);
634 delete options;
635
636 // No frame found... so bail
637 if (!source_frame)
638 return;
639
640 // Debug output
642 "Timeline::add_layer",
643 "new_frame->number", new_frame->number,
644 "clip_frame_number", clip_frame_number);
645
646 /* COPY AUDIO - with correct volume */
647 if (source_clip->Reader()->info.has_audio) {
648 // Debug output
650 "Timeline::add_layer (Copy Audio)",
651 "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio,
652 "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(),
653 "info.channels", info.channels,
654 "clip_frame_number", clip_frame_number);
655
656 if (source_frame->GetAudioChannelsCount() == info.channels && source_clip->has_audio.GetInt(clip_frame_number) != 0)
657 for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
658 {
659 // Get volume from previous frame and this frame
660 float previous_volume = source_clip->volume.GetValue(clip_frame_number - 1);
661 float volume = source_clip->volume.GetValue(clip_frame_number);
662 int channel_filter = source_clip->channel_filter.GetInt(clip_frame_number); // optional channel to filter (if not -1)
663 int channel_mapping = source_clip->channel_mapping.GetInt(clip_frame_number); // optional channel to map this channel to (if not -1)
664
665 // Apply volume mixing strategy
666 if (source_clip->mixing == VOLUME_MIX_AVERAGE && max_volume > 1.0) {
667 // Don't allow this clip to exceed 100% (divide volume equally between all overlapping clips with volume
668 previous_volume = previous_volume / max_volume;
669 volume = volume / max_volume;
670 }
671 else if (source_clip->mixing == VOLUME_MIX_REDUCE && max_volume > 1.0) {
672 // Reduce clip volume by a bit, hoping it will prevent exceeding 100% (but it is very possible it will)
673 previous_volume = previous_volume * 0.77;
674 volume = volume * 0.77;
675 }
676
677 // If channel filter enabled, check for correct channel (and skip non-matching channels)
678 if (channel_filter != -1 && channel_filter != channel)
679 continue; // skip to next channel
680
681 // If no volume on this frame or previous frame, do nothing
682 if (previous_volume == 0.0 && volume == 0.0)
683 continue; // skip to next channel
684
685 // If channel mapping disabled, just use the current channel
686 if (channel_mapping == -1)
687 channel_mapping = channel;
688
689 // Apply ramp to source frame (if needed)
690 if (!isEqual(previous_volume, 1.0) || !isEqual(volume, 1.0))
691 source_frame->ApplyGainRamp(channel_mapping, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
692
693 // TODO: Improve FrameMapper (or Timeline) to always get the correct number of samples per frame.
694 // Currently, the ResampleContext sometimes leaves behind a few samples for the next call, and the
695 // number of samples returned is variable... and does not match the number expected.
696 // This is a crude solution at best. =)
697 if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount()){
698 // Force timeline frame to match the source frame
699 new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);
700 }
701 // Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to
702 // be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen).
703 new_frame->AddAudio(false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), 1.0);
704 }
705 else
706 // Debug output
708 "Timeline::add_layer (No Audio Copied - Wrong # of Channels)",
709 "source_clip->Reader()->info.has_audio",
710 source_clip->Reader()->info.has_audio,
711 "source_frame->GetAudioChannelsCount()",
712 source_frame->GetAudioChannelsCount(),
713 "info.channels", info.channels,
714 "clip_frame_number", clip_frame_number);
715 }
716
717 // Debug output
719 "Timeline::add_layer (Transform: Composite Image Layer: Completed)",
720 "source_frame->number", source_frame->number,
721 "new_frame->GetImage()->width()", new_frame->GetWidth(),
722 "new_frame->GetImage()->height()", new_frame->GetHeight());
723}
724
725// Update the list of 'opened' clips
726void Timeline::update_open_clips(Clip *clip, bool does_clip_intersect)
727{
728 // Get lock (prevent getting frames while this happens)
729 const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
730
732 "Timeline::update_open_clips (before)",
733 "does_clip_intersect", does_clip_intersect,
734 "closing_clips.size()", closing_clips.size(),
735 "open_clips.size()", open_clips.size());
736
737 // is clip already in list?
738 bool clip_found = open_clips.count(clip);
739
740 if (clip_found && !does_clip_intersect)
741 {
742 // Remove clip from 'opened' list, because it's closed now
743 open_clips.erase(clip);
744
745 // Close clip
746 clip->Close();
747 }
748 else if (!clip_found && does_clip_intersect)
749 {
750 // Add clip to 'opened' list, because it's missing
751 open_clips[clip] = clip;
752
753 try {
754 // Open the clip
755 clip->Open();
756
757 } catch (const InvalidFile & e) {
758 // ...
759 }
760 }
761
762 // Debug output
764 "Timeline::update_open_clips (after)",
765 "does_clip_intersect", does_clip_intersect,
766 "clip_found", clip_found,
767 "closing_clips.size()", closing_clips.size(),
768 "open_clips.size()", open_clips.size());
769}
770
771// Calculate the max and min duration (in seconds) of the timeline, based on all the clips, and cache the value
772void Timeline::calculate_max_duration() {
773 double last_clip = 0.0;
774 double last_effect = 0.0;
775 double first_clip = std::numeric_limits<double>::max();
776 double first_effect = std::numeric_limits<double>::max();
777
778 // Find the last and first clip
779 if (!clips.empty()) {
780 // Find the clip with the maximum end frame
781 const auto max_clip = std::max_element(
782 clips.begin(), clips.end(), CompareClipEndFrames());
783 last_clip = (*max_clip)->Position() + (*max_clip)->Duration();
784
785 // Find the clip with the minimum start position (ignoring layer)
786 const auto min_clip = std::min_element(
787 clips.begin(), clips.end(), [](const openshot::Clip* lhs, const openshot::Clip* rhs) {
788 return lhs->Position() < rhs->Position();
789 });
790 first_clip = (*min_clip)->Position();
791 }
792
793 // Find the last and first effect
794 if (!effects.empty()) {
795 // Find the effect with the maximum end frame
796 const auto max_effect = std::max_element(
797 effects.begin(), effects.end(), CompareEffectEndFrames());
798 last_effect = (*max_effect)->Position() + (*max_effect)->Duration();
799
800 // Find the effect with the minimum start position
801 const auto min_effect = std::min_element(
802 effects.begin(), effects.end(), [](const openshot::EffectBase* lhs, const openshot::EffectBase* rhs) {
803 return lhs->Position() < rhs->Position();
804 });
805 first_effect = (*min_effect)->Position();
806 }
807
808 // Calculate the max and min time
809 max_time = std::max(last_clip, last_effect);
810 min_time = std::min(first_clip, first_effect);
811
812 // If no clips or effects exist, set min_time to 0
813 if (clips.empty() && effects.empty()) {
814 min_time = 0.0;
815 max_time = 0.0;
816 }
817}
818
819// Sort clips by position on the timeline
820void Timeline::sort_clips()
821{
822 // Get lock (prevent getting frames while this happens)
823 const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
824
825 // Debug output
827 "Timeline::SortClips",
828 "clips.size()", clips.size());
829
830 // sort clips
831 clips.sort(CompareClips());
832
833 // calculate max timeline duration
834 calculate_max_duration();
835}
836
837// Sort effects by position on the timeline
838void Timeline::sort_effects()
839{
840 // Get lock (prevent getting frames while this happens)
841 const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
842
843 // sort clips
844 effects.sort(CompareEffects());
845
846 // calculate max timeline duration
847 calculate_max_duration();
848}
849
850// Clear all clips from timeline
852{
853 ZmqLogger::Instance()->AppendDebugMethod("Timeline::Clear");
854
855 // Get lock (prevent getting frames while this happens)
856 const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
857
858 // Close all open clips
859 for (auto clip : clips)
860 {
861 update_open_clips(clip, false);
862
863 // Delete clip object (if timeline allocated it)
864 bool allocated = allocated_clips.count(clip);
865 if (allocated) {
866 delete clip;
867 }
868 }
869 // Clear all clips
870 clips.clear();
871 allocated_clips.clear();
872
873 // Close all effects
874 for (auto effect : effects)
875 {
876 // Delete effect object (if timeline allocated it)
877 bool allocated = allocated_effects.count(effect);
878 if (allocated) {
879 delete effect;
880 }
881 }
882 // Clear all effects
883 effects.clear();
884 allocated_effects.clear();
885
886 // Delete all FrameMappers
887 for (auto mapper : allocated_frame_mappers)
888 {
889 mapper->Reader(NULL);
890 mapper->Close();
891 delete mapper;
892 }
893 allocated_frame_mappers.clear();
894}
895
896// Close the reader (and any resources it was consuming)
898{
899 ZmqLogger::Instance()->AppendDebugMethod("Timeline::Close");
900
901 // Get lock (prevent getting frames while this happens)
902 const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
903
904 // Close all open clips
905 for (auto clip : clips)
906 {
907 // Open or Close this clip, based on if it's intersecting or not
908 update_open_clips(clip, false);
909 }
910
911 // Mark timeline as closed
912 is_open = false;
913
914 // Clear all cache (deep clear, including nested Readers)
915 ClearAllCache(true);
916}
917
918// Open the reader (and start consuming resources)
920{
921 is_open = true;
922}
923
924// Compare 2 floating point numbers for equality
925bool Timeline::isEqual(double a, double b)
926{
927 return fabs(a - b) < 0.000001;
928}
929
930// Get an openshot::Frame object for a specific frame number of this reader.
931std::shared_ptr<Frame> Timeline::GetFrame(int64_t requested_frame)
932{
933 // Adjust out of bounds frame number
934 if (requested_frame < 1)
935 requested_frame = 1;
936
937 // Check cache
938 std::shared_ptr<Frame> frame;
939 frame = final_cache->GetFrame(requested_frame);
940 if (frame) {
941 // Debug output
943 "Timeline::GetFrame (Cached frame found)",
944 "requested_frame", requested_frame);
945
946 // Return cached frame
947 return frame;
948 }
949 else
950 {
951 // Prevent async calls to the following code
952 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
953
954 // Check cache 2nd time
955 std::shared_ptr<Frame> frame;
956 frame = final_cache->GetFrame(requested_frame);
957 if (frame) {
958 // Debug output
960 "Timeline::GetFrame (Cached frame found on 2nd check)",
961 "requested_frame", requested_frame);
962
963 // Return cached frame
964 return frame;
965 } else {
966 // Get a list of clips that intersect with the requested section of timeline
967 // This also opens the readers for intersecting clips, and marks non-intersecting clips as 'needs closing'
968 std::vector<Clip *> nearby_clips;
969 nearby_clips = find_intersecting_clips(requested_frame, 1, true);
970
971 // Debug output
973 "Timeline::GetFrame (processing frame)",
974 "requested_frame", requested_frame,
975 "omp_get_thread_num()", omp_get_thread_num());
976
977 // Init some basic properties about this frame
978 int samples_in_frame = Frame::GetSamplesPerFrame(requested_frame, info.fps, info.sample_rate, info.channels);
979
980 // Create blank frame (which will become the requested frame)
981 std::shared_ptr<Frame> new_frame(std::make_shared<Frame>(requested_frame, preview_width, preview_height, "#000000", samples_in_frame, info.channels));
982 new_frame->AddAudioSilence(samples_in_frame);
983 new_frame->SampleRate(info.sample_rate);
984 new_frame->ChannelsLayout(info.channel_layout);
985
986 // Debug output
988 "Timeline::GetFrame (Adding solid color)",
989 "requested_frame", requested_frame,
990 "info.width", info.width,
991 "info.height", info.height);
992
993 // Add Background Color to 1st layer (if animated or not black)
994 if ((color.red.GetCount() > 1 || color.green.GetCount() > 1 || color.blue.GetCount() > 1) ||
995 (color.red.GetValue(requested_frame) != 0.0 || color.green.GetValue(requested_frame) != 0.0 ||
996 color.blue.GetValue(requested_frame) != 0.0))
997 new_frame->AddColor(preview_width, preview_height, color.GetColorHex(requested_frame));
998
999 // Debug output
1001 "Timeline::GetFrame (Loop through clips)",
1002 "requested_frame", requested_frame,
1003 "clips.size()", clips.size(),
1004 "nearby_clips.size()", nearby_clips.size());
1005
1006 // Find Clips near this time
1007 for (auto clip : nearby_clips) {
1008 long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
1009 long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble());
1010 bool does_clip_intersect = (clip_start_position <= requested_frame && clip_end_position >= requested_frame);
1011
1012 // Debug output
1014 "Timeline::GetFrame (Does clip intersect)",
1015 "requested_frame", requested_frame,
1016 "clip->Position()", clip->Position(),
1017 "clip->Duration()", clip->Duration(),
1018 "does_clip_intersect", does_clip_intersect);
1019
1020 // Clip is visible
1021 if (does_clip_intersect) {
1022 // Determine if clip is "top" clip on this layer (only happens when multiple clips are overlapping)
1023 bool is_top_clip = true;
1024 float max_volume = 0.0;
1025 for (auto nearby_clip : nearby_clips) {
1026 long nearby_clip_start_position = round(nearby_clip->Position() * info.fps.ToDouble()) + 1;
1027 long nearby_clip_end_position = round((nearby_clip->Position() + nearby_clip->Duration()) * info.fps.ToDouble()) + 1;
1028 long nearby_clip_start_frame = (nearby_clip->Start() * info.fps.ToDouble()) + 1;
1029 long nearby_clip_frame_number = requested_frame - nearby_clip_start_position + nearby_clip_start_frame;
1030
1031 // Determine if top clip
1032 if (clip->Id() != nearby_clip->Id() && clip->Layer() == nearby_clip->Layer() &&
1033 nearby_clip_start_position <= requested_frame && nearby_clip_end_position >= requested_frame &&
1034 nearby_clip_start_position > clip_start_position && is_top_clip == true) {
1035 is_top_clip = false;
1036 }
1037
1038 // Determine max volume of overlapping clips
1039 if (nearby_clip->Reader() && nearby_clip->Reader()->info.has_audio &&
1040 nearby_clip->has_audio.GetInt(nearby_clip_frame_number) != 0 &&
1041 nearby_clip_start_position <= requested_frame && nearby_clip_end_position >= requested_frame) {
1042 max_volume += nearby_clip->volume.GetValue(nearby_clip_frame_number);
1043 }
1044 }
1045
1046 // Determine the frame needed for this clip (based on the position on the timeline)
1047 long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
1048 long clip_frame_number = requested_frame - clip_start_position + clip_start_frame;
1049
1050 // Debug output
1052 "Timeline::GetFrame (Calculate clip's frame #)",
1053 "clip->Position()", clip->Position(),
1054 "clip->Start()", clip->Start(),
1055 "info.fps.ToFloat()", info.fps.ToFloat(),
1056 "clip_frame_number", clip_frame_number);
1057
1058 // Add clip's frame as layer
1059 add_layer(new_frame, clip, clip_frame_number, is_top_clip, max_volume);
1060
1061 } else {
1062 // Debug output
1064 "Timeline::GetFrame (clip does not intersect)",
1065 "requested_frame", requested_frame,
1066 "does_clip_intersect", does_clip_intersect);
1067 }
1068
1069 } // end clip loop
1070
1071 // Debug output
1073 "Timeline::GetFrame (Add frame to cache)",
1074 "requested_frame", requested_frame,
1075 "info.width", info.width,
1076 "info.height", info.height);
1077
1078 // Set frame # on mapped frame
1079 new_frame->SetFrameNumber(requested_frame);
1080
1081 // Add final frame to cache
1082 final_cache->Add(new_frame);
1083
1084 // Return frame (or blank frame)
1085 return new_frame;
1086 }
1087 }
1088}
1089
1090
1091// Find intersecting clips (or non intersecting clips)
1092std::vector<Clip*> Timeline::find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include)
1093{
1094 // Find matching clips
1095 std::vector<Clip*> matching_clips;
1096
1097 // Calculate time of frame
1098 float min_requested_frame = requested_frame;
1099 float max_requested_frame = requested_frame + (number_of_frames - 1);
1100
1101 // Find Clips at this time
1102 for (auto clip : clips)
1103 {
1104 // Does clip intersect the current requested time
1105 long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
1106 long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
1107
1108 bool does_clip_intersect =
1109 (clip_start_position <= min_requested_frame || clip_start_position <= max_requested_frame) &&
1110 (clip_end_position >= min_requested_frame || clip_end_position >= max_requested_frame);
1111
1112 // Debug output
1114 "Timeline::find_intersecting_clips (Is clip near or intersecting)",
1115 "requested_frame", requested_frame,
1116 "min_requested_frame", min_requested_frame,
1117 "max_requested_frame", max_requested_frame,
1118 "clip->Position()", clip->Position(),
1119 "does_clip_intersect", does_clip_intersect);
1120
1121 // Open (or schedule for closing) this clip, based on if it's intersecting or not
1122 update_open_clips(clip, does_clip_intersect);
1123
1124 // Clip is visible
1125 if (does_clip_intersect && include)
1126 // Add the intersecting clip
1127 matching_clips.push_back(clip);
1128
1129 else if (!does_clip_intersect && !include)
1130 // Add the non-intersecting clip
1131 matching_clips.push_back(clip);
1132
1133 } // end clip loop
1134
1135 // return list
1136 return matching_clips;
1137}
1138
1139// Set the cache object used by this reader
1141 // Get lock (prevent getting frames while this happens)
1142 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
1143
1144 // Destroy previous cache (if managed by timeline)
1145 if (managed_cache && final_cache) {
1146 delete final_cache;
1147 final_cache = NULL;
1148 managed_cache = false;
1149 }
1150
1151 // Set new cache
1152 final_cache = new_cache;
1153}
1154
1155// Generate JSON string of this object
1156std::string Timeline::Json() const {
1157
1158 // Return formatted string
1159 return JsonValue().toStyledString();
1160}
1161
1162// Generate Json::Value for this object
1163Json::Value Timeline::JsonValue() const {
1164
1165 // Create root json object
1166 Json::Value root = ReaderBase::JsonValue(); // get parent properties
1167 root["type"] = "Timeline";
1168 root["viewport_scale"] = viewport_scale.JsonValue();
1169 root["viewport_x"] = viewport_x.JsonValue();
1170 root["viewport_y"] = viewport_y.JsonValue();
1171 root["color"] = color.JsonValue();
1172 root["path"] = path;
1173
1174 // Add array of clips
1175 root["clips"] = Json::Value(Json::arrayValue);
1176
1177 // Find Clips at this time
1178 for (const auto existing_clip : clips)
1179 {
1180 root["clips"].append(existing_clip->JsonValue());
1181 }
1182
1183 // Add array of effects
1184 root["effects"] = Json::Value(Json::arrayValue);
1185
1186 // loop through effects
1187 for (const auto existing_effect: effects)
1188 {
1189 root["effects"].append(existing_effect->JsonValue());
1190 }
1191
1192 // return JsonValue
1193 return root;
1194}
1195
1196// Load JSON string into this object
1197void Timeline::SetJson(const std::string value) {
1198
1199 // Get lock (prevent getting frames while this happens)
1200 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
1201
1202 // Parse JSON string into JSON objects
1203 try
1204 {
1205 const Json::Value root = openshot::stringToJson(value);
1206 // Set all values that match
1207 SetJsonValue(root);
1208 }
1209 catch (const std::exception& e)
1210 {
1211 // Error parsing JSON (or missing keys)
1212 throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
1213 }
1214}
1215
1216// Load Json::Value into this object
1217void Timeline::SetJsonValue(const Json::Value root) {
1218
1219 // Get lock (prevent getting frames while this happens)
1220 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
1221
1222 // Close timeline before we do anything (this closes all clips)
1223 bool was_open = is_open;
1224 Close();
1225
1226 // Set parent data
1228
1229 // Set data from Json (if key is found)
1230 if (!root["path"].isNull())
1231 path = root["path"].asString();
1232
1233 if (!root["clips"].isNull()) {
1234 // Clear existing clips
1235 clips.clear();
1236
1237 // loop through clips
1238 for (const Json::Value existing_clip : root["clips"]) {
1239 // Skip NULL nodes
1240 if (existing_clip.isNull()) {
1241 continue;
1242 }
1243
1244 // Create Clip
1245 Clip *c = new Clip();
1246
1247 // Keep track of allocated clip objects
1248 allocated_clips.insert(c);
1249
1250 // When a clip is attached to an object, it searches for the object
1251 // on it's parent timeline. Setting the parent timeline of the clip here
1252 // allows attaching it to an object when exporting the project (because)
1253 // the exporter script initializes the clip and it's effects
1254 // before setting its parent timeline.
1255 c->ParentTimeline(this);
1256
1257 // Load Json into Clip
1258 c->SetJsonValue(existing_clip);
1259
1260 // Add Clip to Timeline
1261 AddClip(c);
1262 }
1263 }
1264
1265 if (!root["effects"].isNull()) {
1266 // Clear existing effects
1267 effects.clear();
1268
1269 // loop through effects
1270 for (const Json::Value existing_effect :root["effects"]) {
1271 // Skip NULL nodes
1272 if (existing_effect.isNull()) {
1273 continue;
1274 }
1275
1276 // Create Effect
1277 EffectBase *e = NULL;
1278
1279 if (!existing_effect["type"].isNull()) {
1280 // Create instance of effect
1281 if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString())) ) {
1282
1283 // Keep track of allocated effect objects
1284 allocated_effects.insert(e);
1285
1286 // Load Json into Effect
1287 e->SetJsonValue(existing_effect);
1288
1289 // Add Effect to Timeline
1290 AddEffect(e);
1291 }
1292 }
1293 }
1294 }
1295
1296 if (!root["duration"].isNull()) {
1297 // Update duration of timeline
1298 info.duration = root["duration"].asDouble();
1300 }
1301
1302 // Update preview settings
1305
1306 // Resort (and recalculate min/max duration)
1307 sort_clips();
1308 sort_effects();
1309
1310 // Re-open if needed
1311 if (was_open)
1312 Open();
1313}
1314
1315// Apply a special formatted JSON object, which represents a change to the timeline (insert, update, delete)
1316void Timeline::ApplyJsonDiff(std::string value) {
1317
1318 // Get lock (prevent getting frames while this happens)
1319 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
1320
1321 // Parse JSON string into JSON objects
1322 try
1323 {
1324 const Json::Value root = openshot::stringToJson(value);
1325 // Process the JSON change array, loop through each item
1326 for (const Json::Value change : root) {
1327 std::string change_key = change["key"][(uint)0].asString();
1328
1329 // Process each type of change
1330 if (change_key == "clips")
1331 // Apply to CLIPS
1332 apply_json_to_clips(change);
1333
1334 else if (change_key == "effects")
1335 // Apply to EFFECTS
1336 apply_json_to_effects(change);
1337
1338 else
1339 // Apply to TIMELINE
1340 apply_json_to_timeline(change);
1341
1342 }
1343 }
1344 catch (const std::exception& e)
1345 {
1346 // Error parsing JSON (or missing keys)
1347 throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
1348 }
1349}
1350
1351// Apply JSON diff to clips
1352void Timeline::apply_json_to_clips(Json::Value change) {
1353
1354 // Get key and type of change
1355 std::string change_type = change["type"].asString();
1356 std::string clip_id = "";
1357 Clip *existing_clip = NULL;
1358
1359 // Find id of clip (if any)
1360 for (auto key_part : change["key"]) {
1361 // Get each change
1362 if (key_part.isObject()) {
1363 // Check for id
1364 if (!key_part["id"].isNull()) {
1365 // Set the id
1366 clip_id = key_part["id"].asString();
1367
1368 // Find matching clip in timeline (if any)
1369 for (auto c : clips)
1370 {
1371 if (c->Id() == clip_id) {
1372 existing_clip = c;
1373 break; // clip found, exit loop
1374 }
1375 }
1376 break; // id found, exit loop
1377 }
1378 }
1379 }
1380
1381 // Check for a more specific key (targetting this clip's effects)
1382 // For example: ["clips", {"id:123}, "effects", {"id":432}]
1383 if (existing_clip && change["key"].size() == 4 && change["key"][2] == "effects")
1384 {
1385 // This change is actually targetting a specific effect under a clip (and not the clip)
1386 Json::Value key_part = change["key"][3];
1387
1388 if (key_part.isObject()) {
1389 // Check for id
1390 if (!key_part["id"].isNull())
1391 {
1392 // Set the id
1393 std::string effect_id = key_part["id"].asString();
1394
1395 // Find matching effect in timeline (if any)
1396 std::list<EffectBase*> effect_list = existing_clip->Effects();
1397 for (auto e : effect_list)
1398 {
1399 if (e->Id() == effect_id) {
1400 // Apply the change to the effect directly
1401 apply_json_to_effects(change, e);
1402
1403 // Calculate start and end frames that this impacts, and remove those frames from the cache
1404 int64_t new_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1405 int64_t new_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1406 final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1407
1408 return; // effect found, don't update clip
1409 }
1410 }
1411 }
1412 }
1413 }
1414
1415 // Calculate start and end frames that this impacts, and remove those frames from the cache
1416 if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1417 int64_t new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
1418 int64_t new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
1419 final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1420 }
1421
1422 // Determine type of change operation
1423 if (change_type == "insert") {
1424
1425 // Create clip
1426 Clip *clip = new Clip();
1427
1428 // Keep track of allocated clip objects
1429 allocated_clips.insert(clip);
1430
1431 // Set properties of clip from JSON
1432 clip->SetJsonValue(change["value"]);
1433
1434 // Add clip to timeline
1435 AddClip(clip);
1436
1437 } else if (change_type == "update") {
1438
1439 // Update existing clip
1440 if (existing_clip) {
1441
1442 // Calculate start and end frames that this impacts, and remove those frames from the cache
1443 int64_t old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1444 int64_t old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1445 final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1446
1447 // Remove cache on clip's Reader (if found)
1448 if (existing_clip->Reader() && existing_clip->Reader()->GetCache())
1449 existing_clip->Reader()->GetCache()->Remove(old_starting_frame - 8, old_ending_frame + 8);
1450
1451 // Update clip properties from JSON
1452 existing_clip->SetJsonValue(change["value"]);
1453
1454 // Apply framemapper (or update existing framemapper)
1455 if (auto_map_clips) {
1456 apply_mapper_to_clip(existing_clip);
1457 }
1458 }
1459
1460 } else if (change_type == "delete") {
1461
1462 // Remove existing clip
1463 if (existing_clip) {
1464
1465 // Calculate start and end frames that this impacts, and remove those frames from the cache
1466 int64_t old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1467 int64_t old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1468 final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1469
1470 // Remove clip from timeline
1471 RemoveClip(existing_clip);
1472 }
1473
1474 }
1475
1476 // Re-Sort Clips (since they likely changed)
1477 sort_clips();
1478}
1479
1480// Apply JSON diff to effects
1481void Timeline::apply_json_to_effects(Json::Value change) {
1482
1483 // Get key and type of change
1484 std::string change_type = change["type"].asString();
1485 EffectBase *existing_effect = NULL;
1486
1487 // Find id of an effect (if any)
1488 for (auto key_part : change["key"]) {
1489
1490 if (key_part.isObject()) {
1491 // Check for id
1492 if (!key_part["id"].isNull())
1493 {
1494 // Set the id
1495 std::string effect_id = key_part["id"].asString();
1496
1497 // Find matching effect in timeline (if any)
1498 for (auto e : effects)
1499 {
1500 if (e->Id() == effect_id) {
1501 existing_effect = e;
1502 break; // effect found, exit loop
1503 }
1504 }
1505 break; // id found, exit loop
1506 }
1507 }
1508 }
1509
1510 // Now that we found the effect, apply the change to it
1511 if (existing_effect || change_type == "insert") {
1512 // Apply change to effect
1513 apply_json_to_effects(change, existing_effect);
1514 }
1515}
1516
1517// Apply JSON diff to effects (if you already know which effect needs to be updated)
1518void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_effect) {
1519
1520 // Get key and type of change
1521 std::string change_type = change["type"].asString();
1522
1523 // Calculate start and end frames that this impacts, and remove those frames from the cache
1524 if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1525 int64_t new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
1526 int64_t new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
1527 final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1528 }
1529
1530 // Determine type of change operation
1531 if (change_type == "insert") {
1532
1533 // Determine type of effect
1534 std::string effect_type = change["value"]["type"].asString();
1535
1536 // Create Effect
1537 EffectBase *e = NULL;
1538
1539 // Init the matching effect object
1540 if ( (e = EffectInfo().CreateEffect(effect_type)) ) {
1541
1542 // Keep track of allocated effect objects
1543 allocated_effects.insert(e);
1544
1545 // Load Json into Effect
1546 e->SetJsonValue(change["value"]);
1547
1548 // Add Effect to Timeline
1549 AddEffect(e);
1550 }
1551
1552 } else if (change_type == "update") {
1553
1554 // Update existing effect
1555 if (existing_effect) {
1556
1557 // Calculate start and end frames that this impacts, and remove those frames from the cache
1558 int64_t old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
1559 int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
1560 final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1561
1562 // Update effect properties from JSON
1563 existing_effect->SetJsonValue(change["value"]);
1564 }
1565
1566 } else if (change_type == "delete") {
1567
1568 // Remove existing effect
1569 if (existing_effect) {
1570
1571 // Calculate start and end frames that this impacts, and remove those frames from the cache
1572 int64_t old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
1573 int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
1574 final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1575
1576 // Remove effect from timeline
1577 RemoveEffect(existing_effect);
1578 }
1579
1580 }
1581
1582 // Re-Sort Effects (since they likely changed)
1583 sort_effects();
1584}
1585
1586// Apply JSON diff to timeline properties
1587void Timeline::apply_json_to_timeline(Json::Value change) {
1588 bool cache_dirty = true;
1589
1590 // Get key and type of change
1591 std::string change_type = change["type"].asString();
1592 std::string root_key = change["key"][(uint)0].asString();
1593 std::string sub_key = "";
1594 if (change["key"].size() >= 2)
1595 sub_key = change["key"][(uint)1].asString();
1596
1597 // Determine type of change operation
1598 if (change_type == "insert" || change_type == "update") {
1599
1600 // INSERT / UPDATE
1601 // Check for valid property
1602 if (root_key == "color")
1603 // Set color
1604 color.SetJsonValue(change["value"]);
1605 else if (root_key == "viewport_scale")
1606 // Set viewport scale
1607 viewport_scale.SetJsonValue(change["value"]);
1608 else if (root_key == "viewport_x")
1609 // Set viewport x offset
1610 viewport_x.SetJsonValue(change["value"]);
1611 else if (root_key == "viewport_y")
1612 // Set viewport y offset
1613 viewport_y.SetJsonValue(change["value"]);
1614 else if (root_key == "duration") {
1615 // Update duration of timeline
1616 info.duration = change["value"].asDouble();
1618
1619 // We don't want to clear cache for duration adjustments
1620 cache_dirty = false;
1621 }
1622 else if (root_key == "width") {
1623 // Set width
1624 info.width = change["value"].asInt();
1626 }
1627 else if (root_key == "height") {
1628 // Set height
1629 info.height = change["value"].asInt();
1631 }
1632 else if (root_key == "fps" && sub_key == "" && change["value"].isObject()) {
1633 // Set fps fraction
1634 if (!change["value"]["num"].isNull())
1635 info.fps.num = change["value"]["num"].asInt();
1636 if (!change["value"]["den"].isNull())
1637 info.fps.den = change["value"]["den"].asInt();
1638 }
1639 else if (root_key == "fps" && sub_key == "num")
1640 // Set fps.num
1641 info.fps.num = change["value"].asInt();
1642 else if (root_key == "fps" && sub_key == "den")
1643 // Set fps.den
1644 info.fps.den = change["value"].asInt();
1645 else if (root_key == "display_ratio" && sub_key == "" && change["value"].isObject()) {
1646 // Set display_ratio fraction
1647 if (!change["value"]["num"].isNull())
1648 info.display_ratio.num = change["value"]["num"].asInt();
1649 if (!change["value"]["den"].isNull())
1650 info.display_ratio.den = change["value"]["den"].asInt();
1651 }
1652 else if (root_key == "display_ratio" && sub_key == "num")
1653 // Set display_ratio.num
1654 info.display_ratio.num = change["value"].asInt();
1655 else if (root_key == "display_ratio" && sub_key == "den")
1656 // Set display_ratio.den
1657 info.display_ratio.den = change["value"].asInt();
1658 else if (root_key == "pixel_ratio" && sub_key == "" && change["value"].isObject()) {
1659 // Set pixel_ratio fraction
1660 if (!change["value"]["num"].isNull())
1661 info.pixel_ratio.num = change["value"]["num"].asInt();
1662 if (!change["value"]["den"].isNull())
1663 info.pixel_ratio.den = change["value"]["den"].asInt();
1664 }
1665 else if (root_key == "pixel_ratio" && sub_key == "num")
1666 // Set pixel_ratio.num
1667 info.pixel_ratio.num = change["value"].asInt();
1668 else if (root_key == "pixel_ratio" && sub_key == "den")
1669 // Set pixel_ratio.den
1670 info.pixel_ratio.den = change["value"].asInt();
1671
1672 else if (root_key == "sample_rate")
1673 // Set sample rate
1674 info.sample_rate = change["value"].asInt();
1675 else if (root_key == "channels")
1676 // Set channels
1677 info.channels = change["value"].asInt();
1678 else if (root_key == "channel_layout")
1679 // Set channel layout
1680 info.channel_layout = (ChannelLayout) change["value"].asInt();
1681 else
1682 // Error parsing JSON (or missing keys)
1683 throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1684
1685
1686 } else if (change["type"].asString() == "delete") {
1687
1688 // DELETE / RESET
1689 // Reset the following properties (since we can't delete them)
1690 if (root_key == "color") {
1691 color = Color();
1692 color.red = Keyframe(0.0);
1693 color.green = Keyframe(0.0);
1694 color.blue = Keyframe(0.0);
1695 }
1696 else if (root_key == "viewport_scale")
1697 viewport_scale = Keyframe(1.0);
1698 else if (root_key == "viewport_x")
1699 viewport_x = Keyframe(0.0);
1700 else if (root_key == "viewport_y")
1701 viewport_y = Keyframe(0.0);
1702 else
1703 // Error parsing JSON (or missing keys)
1704 throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1705
1706 }
1707
1708 if (cache_dirty) {
1709 // Clear entire cache
1710 ClearAllCache();
1711 }
1712}
1713
1714// Clear all caches
1716
1717 // Clear primary cache
1718 if (final_cache) {
1719 final_cache->Clear();
1720 }
1721
1722 // Loop through all clips
1723 try {
1724 for (const auto clip : clips) {
1725 // Clear cache on clip
1726 clip->Reader()->GetCache()->Clear();
1727
1728 // Clear nested Reader (if deep clear requested)
1729 if (deep && clip->Reader()->Name() == "FrameMapper") {
1730 FrameMapper *nested_reader = static_cast<FrameMapper *>(clip->Reader());
1731 if (nested_reader->Reader() && nested_reader->Reader()->GetCache())
1732 nested_reader->Reader()->GetCache()->Clear();
1733 }
1734
1735 // Clear clip cache
1736 clip->GetCache()->Clear();
1737 }
1738 } catch (const ReaderClosed & e) {
1739 // ...
1740 }
1741}
1742
1743// Set Max Image Size (used for performance optimization). Convenience function for setting
1744// Settings::Instance()->MAX_WIDTH and Settings::Instance()->MAX_HEIGHT.
1745void Timeline::SetMaxSize(int width, int height) {
1746 // Maintain aspect ratio regardless of what size is passed in
1747 QSize display_ratio_size = QSize(info.width, info.height);
1748 QSize proposed_size = QSize(std::min(width, info.width), std::min(height, info.height));
1749
1750 // Scale QSize up to proposed size
1751 display_ratio_size.scale(proposed_size, Qt::KeepAspectRatio);
1752
1753 // Update preview settings
1754 preview_width = display_ratio_size.width();
1755 preview_height = display_ratio_size.height();
1756}
Header file for CacheBase class.
Header file for CacheDisk class.
Header file for CacheMemory class.
Header file for CrashHandler class.
Header file for all Exception classes.
Header file for the FrameMapper class.
#define OPEN_MP_NUM_PROCESSORS
Header file for Timeline class.
All cache managers in libopenshot are based on this CacheBase class.
Definition: CacheBase.h:35
virtual void Clear()=0
Clear the cache of all frames.
virtual void Remove(int64_t frame_number)=0
Remove a specific frame.
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)=0
Get a frame from the cache.
virtual void Add(std::shared_ptr< openshot::Frame > frame)=0
Add a Frame to the cache.
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition: CacheBase.cpp:30
This class is a memory-based cache manager for Frame objects.
Definition: CacheMemory.h:29
float Start() const
Get start position (in seconds) of clip (trim start of video)
Definition: ClipBase.h:88
float Duration() const
Get the length of this clip (in seconds)
Definition: ClipBase.h:90
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)=0
This method is required for all derived classes of ClipBase, and returns a new openshot::Frame object...
std::string Id() const
Get the Id of this clip object.
Definition: ClipBase.h:85
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.h:87
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ClipBase.cpp:80
float Position() const
Get position on timeline (in seconds)
Definition: ClipBase.h:86
virtual openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any)
Definition: ClipBase.h:91
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:89
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition: Clip.h:171
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
Definition: Clip.h:337
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
Definition: Clip.cpp:924
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
Definition: Clip.h:341
openshot::TimelineBase * ParentTimeline() override
Get the associated Timeline pointer (if any)
Definition: Clip.h:284
std::list< openshot::EffectBase * > Effects()
Return the list of effects on the timeline.
Definition: Clip.h:233
openshot::Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:321
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
Definition: Clip.h:338
void Reader(openshot::ReaderBase *new_reader)
Set the current reader.
Definition: Clip.cpp:274
This class represents a color (used on the timeline and clips)
Definition: Color.h:27
std::string GetColorHex(int64_t frame_number)
Get the HEX value of a color at a specific frame.
Definition: Color.cpp:47
openshot::Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:32
openshot::Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:30
openshot::Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:31
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: Color.cpp:117
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: Color.cpp:86
static CrashHandler * Instance()
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:54
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: EffectBase.cpp:115
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:29
EffectBase * CreateEffect(std::string effect_type)
Create an instance of an effect (factory style)
Definition: EffectInfo.cpp:26
This class represents a fraction.
Definition: Fraction.h:30
int num
Numerator for the fraction.
Definition: Fraction.h:32
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:35
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:40
void Reduce()
Reduce this fraction (i.e. 640/480 = 4/3)
Definition: Fraction.cpp:65
Fraction Reciprocal() const
Return the reciprocal as a Fraction.
Definition: Fraction.cpp:78
int den
Denominator for the fraction.
Definition: Fraction.h:33
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
Definition: FrameMapper.h:193
void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout)
Change frame rate or audio mapping details.
ReaderBase * Reader()
Get the current reader.
Definition: FrameMapper.cpp:64
void Close() override
Close the openshot::FrameMapper and internal reader.
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:484
Exception for files that can not be found or opened.
Definition: Exceptions.h:188
Exception for missing JSON Change key.
Definition: Exceptions.h:263
Exception for invalid JSON.
Definition: Exceptions.h:218
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
Definition: KeyFrame.h:53
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:282
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: KeyFrame.cpp:372
double GetValue(int64_t index) const
Get the value at a specific index.
Definition: KeyFrame.cpp:258
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: KeyFrame.cpp:339
int64_t GetCount() const
Get the number of points (i.e. # of points)
Definition: KeyFrame.cpp:424
Exception for frames that are out of bounds.
Definition: Exceptions.h:301
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:76
openshot::ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:88
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ReaderBase.cpp:162
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ReaderBase.cpp:107
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
Definition: ReaderBase.h:79
virtual openshot::CacheBase * GetCache()=0
Get the cache object used by this reader (note: not all readers use cache)
openshot::ClipBase * clip
Pointer to the parent clip instance (if any)
Definition: ReaderBase.h:80
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:364
This class is contains settings used by libopenshot (and can be safely toggled at any point)
Definition: Settings.h:26
std::string PATH_OPENSHOT_INSTALL
Definition: Settings.h:111
static Settings * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition: Settings.cpp:23
int preview_height
Optional preview width of timeline image. If your preview window is smaller than the timeline,...
Definition: TimelineBase.h:45
int preview_width
Optional preview width of timeline image. If your preview window is smaller than the timeline,...
Definition: TimelineBase.h:44
This class represents a timeline.
Definition: Timeline.h:148
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
Definition: Timeline.cpp:223
Json::Value JsonValue() const override
Generate Json::Value for this object.
Definition: Timeline.cpp:1163
openshot::Keyframe viewport_scale
Curve representing the scale of the viewport (0 to 100)
Definition: Timeline.h:318
void ApplyJsonDiff(std::string value)
Apply a special formatted JSON object, which represents a change to the timeline (add,...
Definition: Timeline.cpp:1316
openshot::EffectBase * GetClipEffect(const std::string &id)
Look up a clip effect by ID.
Definition: Timeline.cpp:431
void AddClip(openshot::Clip *clip)
Add an openshot::Clip to the timeline.
Definition: Timeline.cpp:332
virtual ~Timeline()
Definition: Timeline.cpp:206
std::list< openshot::EffectBase * > ClipEffects() const
Return the list of effects on all clips.
Definition: Timeline.cpp:444
std::list< std::string > GetTrackedObjectsIds() const
Return the ID's of the tracked objects as a list of strings.
Definition: Timeline.cpp:258
std::string Json() const override
Generate JSON string of this object.
Definition: Timeline.cpp:1156
int64_t GetMaxFrame()
Look up the end frame number of the latest element on the timeline.
Definition: Timeline.cpp:469
double GetMinTime()
Look up the position/start time of the first timeline element.
Definition: Timeline.cpp:476
std::shared_ptr< openshot::Frame > GetFrame(int64_t requested_frame) override
Definition: Timeline.cpp:931
void ApplyMapperToClips()
Apply the timeline's framerate and samplerate to all clips.
Definition: Timeline.cpp:515
openshot::Color color
Background color of timeline canvas.
Definition: Timeline.h:323
std::string GetTrackedObjectValues(std::string id, int64_t frame_number) const
Return the trackedObject's properties as a JSON string.
Definition: Timeline.cpp:274
Timeline(int width, int height, openshot::Fraction fps, int sample_rate, int channels, openshot::ChannelLayout channel_layout)
Constructor for the timeline (which configures the default frame properties)
Definition: Timeline.cpp:28
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
Definition: Timeline.cpp:241
int64_t GetMinFrame()
Look up the start frame number of the first element on the timeline (first frame is 1)
Definition: Timeline.cpp:482
openshot::EffectBase * GetEffect(const std::string &id)
Look up a timeline effect by ID.
Definition: Timeline.cpp:420
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
Definition: Timeline.cpp:1217
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
Definition: Timeline.cpp:408
void ClearAllCache(bool deep=false)
Definition: Timeline.cpp:1715
void AddEffect(openshot::EffectBase *effect)
Add an effect to the timeline.
Definition: Timeline.cpp:358
void SetCache(openshot::CacheBase *new_cache)
Definition: Timeline.cpp:1140
void Clear()
Clear all clips, effects, and frame mappers from timeline (and free memory)
Definition: Timeline.cpp:851
openshot::Keyframe viewport_x
Curve representing the x coordinate for the viewport.
Definition: Timeline.h:319
void RemoveClip(openshot::Clip *clip)
Remove an openshot::Clip from the timeline.
Definition: Timeline.cpp:388
void SetMaxSize(int width, int height)
Definition: Timeline.cpp:1745
double GetMaxTime()
Look up the end time of the latest timeline element.
Definition: Timeline.cpp:463
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the timeline.
Definition: Timeline.cpp:371
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct *options)
Apply global/timeline effects to the source frame (if any)
Definition: Timeline.cpp:539
void Open() override
Open the reader (and start consuming resources)
Definition: Timeline.cpp:919
void SetJson(const std::string value) override
Load JSON string into this object.
Definition: Timeline.cpp:1197
openshot::Keyframe viewport_y
Curve representing the y coordinate for the viewport.
Definition: Timeline.h:320
void Close() override
Close the timeline reader (and any resources it was consuming)
Definition: Timeline.cpp:897
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
Definition: ZmqLogger.cpp:178
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition: ZmqLogger.cpp:35
This namespace is the default namespace for all code in the openshot library.
Definition: Compressor.h:29
@ PULLDOWN_NONE
Do not apply pull-down techniques, just repeat or skip entire frames.
Definition: FrameMapper.h:46
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround,...
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
Definition: Enums.h:63
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
Definition: Enums.h:64
const Json::Value stringToJson(const std::string value)
Definition: Json.cpp:16
This struct holds the information of a bounding-box.
float cy
y-coordinate of the bounding box center
float height
bounding box height
float cx
x-coordinate of the bounding box center
float width
bounding box width
float angle
bounding box rotation angle [degrees]
Like CompareClipEndFrames, but for effects.
Definition: Timeline.h:75
This struct contains info about a media file, such as height, width, frames per second,...
Definition: ReaderBase.h:39
float duration
Length of time (in seconds)
Definition: ReaderBase.h:43
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:46
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:61
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:48
openshot::Fraction display_ratio
The ratio of width to height of the video stream (i.e. 640x480 has a ratio of 4/3)
Definition: ReaderBase.h:51
int height
The height of the video (in pixels)
Definition: ReaderBase.h:45
int64_t video_length
The number of frames in the video stream.
Definition: ReaderBase.h:53
std::string acodec
The name of the audio codec used to encode / decode the video stream.
Definition: ReaderBase.h:58
std::string vcodec
The name of the video codec used to encode / decode the video stream.
Definition: ReaderBase.h:52
openshot::Fraction pixel_ratio
The pixel ratio of the video stream as a fraction (i.e. some pixels are not square)
Definition: ReaderBase.h:50
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:62
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:40
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:41
openshot::Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Definition: ReaderBase.h:55
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:60
This struct contains info about the current Timeline clip instance.
Definition: TimelineBase.h:33
bool is_before_clip_keyframes
Is this before clip keyframes are applied.
Definition: TimelineBase.h:35
bool is_top_clip
Is clip on top (if overlapping another clip)
Definition: TimelineBase.h:34