diff --git a/src/monitor/glwidget.cpp b/src/monitor/glwidget.cpp index a6a76188e..c809a6990 100644 --- a/src/monitor/glwidget.cpp +++ b/src/monitor/glwidget.cpp @@ -1,2004 +1,2006 @@ /* * Copyright (c) 2011-2016 Meltytech, LLC * Original author: Dan Dennedy * Modified for Kdenlive: Jean-Baptiste Mardelle * * GL shader based on BSD licensed code from Peter Bengtsson: * http://www.fourcc.org/source/YUV420P-OpenGL-GLSLang.c * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ #include #include #include #include #include #include #include #include #include #include "core.h" #include "glwidget.h" #include "kdenlivesettings.h" #include "monitorproxy.h" #include "profiles/profilemodel.hpp" #include "qml/qmlaudiothumb.h" #include "timeline2/view/qml/timelineitems.h" #include #ifndef GL_UNPACK_ROW_LENGTH #ifdef GL_UNPACK_ROW_LENGTH_EXT #define GL_UNPACK_ROW_LENGTH GL_UNPACK_ROW_LENGTH_EXT #else #error GL_UNPACK_ROW_LENGTH undefined #endif #endif #ifdef QT_NO_DEBUG #define check_error(fn) \ { \ } #else #define check_error(fn) \ { \ uint err = fn->glGetError(); \ if (err != GL_NO_ERROR) { \ qCCritical(KDENLIVE_LOG) << "GL error" << hex << err << dec << "at" << __FILE__ << ":" << __LINE__; \ } \ } #endif #ifndef GL_TIMEOUT_IGNORED #define GL_TIMEOUT_IGNORED 0xFFFFFFFFFFFFFFFFull #endif using namespace Mlt; GLWidget::GLWidget(int id, QObject *parent) : QQuickView((QWindow *)parent) , sendFrameForAnalysis(false) , m_glslManager(nullptr) , m_consumer(nullptr) , m_producer(nullptr) , m_id(id) , m_rulerHeight(QFontMetrics(QApplication::font()).lineSpacing() * 0.7) , m_shader(nullptr) , m_initSem(0) , m_analyseSem(1) , m_isInitialized(false) , m_threadStartEvent(nullptr) , m_threadStopEvent(nullptr) , m_threadCreateEvent(nullptr) , m_threadJoinEvent(nullptr) , m_displayEvent(nullptr) , m_frameRenderer(nullptr) , m_projectionLocation(0) , m_modelViewLocation(0) , m_vertexLocation(0) , m_texCoordLocation(0) , m_colorspaceLocation(0) , m_zoom(1.0f) , m_sendFrame(false) , m_isZoneMode(false) , m_isLoopMode(false) , m_offset(QPoint(0, 0)) , m_audioWaveDisplayed(false) , m_fbo(nullptr) , m_shareContext(nullptr) , m_openGLSync(false) , m_ClientWaitSync(nullptr) { KDeclarative::KDeclarative kdeclarative; kdeclarative.setDeclarativeEngine(engine()); #if KDECLARATIVE_VERSION >= QT_VERSION_CHECK(5, 45, 0) kdeclarative.setupEngine(engine()); kdeclarative.setupContext(); #else kdeclarative.setupBindings(); #endif m_texture[0] = m_texture[1] = m_texture[2] = 0; qRegisterMetaType("Mlt::Frame"); qRegisterMetaType("SharedFrame"); qmlRegisterType("AudioThumb", 1, 0, "QmlAudioThumb"); setPersistentOpenGLContext(true); setPersistentSceneGraph(true); setClearBeforeRendering(false); setResizeMode(QQuickView::SizeRootObjectToView); m_offscreenSurface.setFormat(QWindow::format()); m_offscreenSurface.create(); m_refreshTimer.setSingleShot(true); m_refreshTimer.setInterval(50); m_blackClip.reset(new Mlt::Producer(pCore->getCurrentProfile()->profile(), "color:black")); m_blackClip->set("kdenlive:id", "black"); m_blackClip->set("out", 3); connect(&m_refreshTimer, &QTimer::timeout, this, &GLWidget::refresh); m_producer = m_blackClip; - + rootContext()->setContextProperty("markersModel", 0); if (!initGPUAccel()) { disableGPUAccel(); } connect(this, &QQuickWindow::sceneGraphInitialized, this, &GLWidget::initializeGL, Qt::DirectConnection); connect(this, &QQuickWindow::beforeRendering, this, &GLWidget::paintGL, Qt::DirectConnection); registerTimelineItems(); m_proxy = new MonitorProxy(this); connect(m_proxy, &MonitorProxy::seekRequestChanged, this, &GLWidget::requestSeek); rootContext()->setContextProperty("controller", m_proxy); } GLWidget::~GLWidget() { // C & D delete m_glslManager; delete m_threadStartEvent; delete m_threadStopEvent; delete m_threadCreateEvent; delete m_threadJoinEvent; delete m_displayEvent; if (m_frameRenderer) { if (m_frameRenderer->isRunning()) { QMetaObject::invokeMethod(m_frameRenderer, "cleanup"); m_frameRenderer->quit(); m_frameRenderer->wait(); m_frameRenderer->deleteLater(); } else { delete m_frameRenderer; } } m_blackClip.reset(); delete m_shareContext; delete m_shader; // delete pCore->getCurrentProfile(); } void GLWidget::updateAudioForAnalysis() { if (m_frameRenderer) { m_frameRenderer->sendAudioForAnalysis = KdenliveSettings::monitor_audio(); } } void GLWidget::initializeGL() { if (m_isInitialized || !isVisible() || (openglContext() == nullptr)) return; openglContext()->makeCurrent(&m_offscreenSurface); initializeOpenGLFunctions(); qCDebug(KDENLIVE_LOG) << "OpenGL vendor: " << QString::fromUtf8((const char *)glGetString(GL_VENDOR)); qCDebug(KDENLIVE_LOG) << "OpenGL renderer: " << QString::fromUtf8((const char *)glGetString(GL_RENDERER)); qCDebug(KDENLIVE_LOG) << "OpenGL Threaded: " << openglContext()->supportsThreadedOpenGL(); qCDebug(KDENLIVE_LOG) << "OpenGL ARG_SYNC: " << openglContext()->hasExtension("GL_ARB_sync"); qCDebug(KDENLIVE_LOG) << "OpenGL OpenGLES: " << openglContext()->isOpenGLES(); // C & D if (onlyGLESGPUAccel()) { disableGPUAccel(); } createShader(); m_openGLSync = initGPUAccelSync(); // C & D if (m_glslManager) { // Create a context sharing with this context for the RenderThread context. // This is needed because openglContext() is active in another thread // at the time that RenderThread is created. // See this Qt bug for more info: https://bugreports.qt.io/browse/QTBUG-44677 // TODO: QTBUG-44677 is closed. still applicable? m_shareContext = new QOpenGLContext; m_shareContext->setFormat(openglContext()->format()); m_shareContext->setShareContext(openglContext()); m_shareContext->create(); } m_frameRenderer = new FrameRenderer(openglContext(), &m_offscreenSurface, m_ClientWaitSync); m_frameRenderer->sendAudioForAnalysis = KdenliveSettings::monitor_audio(); openglContext()->makeCurrent(this); // openglContext()->blockSignals(false); connect(m_frameRenderer, &FrameRenderer::frameDisplayed, this, &GLWidget::frameDisplayed, Qt::QueuedConnection); connect(m_frameRenderer, &FrameRenderer::textureReady, this, &GLWidget::updateTexture, Qt::DirectConnection); connect(m_frameRenderer, &FrameRenderer::frameDisplayed, this, &GLWidget::onFrameDisplayed, Qt::QueuedConnection); connect(m_frameRenderer, &FrameRenderer::audioSamplesSignal, this, &GLWidget::audioSamplesSignal, Qt::QueuedConnection); m_initSem.release(); m_isInitialized = true; reconfigure(); } void GLWidget::resizeGL(int width, int height) { int x, y, w, h; height -= m_rulerHeight; double this_aspect = (double)width / height; double video_aspect = pCore->getCurrentProfile()->dar(); // Special case optimization to negate odd effect of sample aspect ratio // not corresponding exactly with image resolution. if ((int)(this_aspect * 1000) == (int)(video_aspect * 1000)) { w = width; h = height; } // Use OpenGL to normalise sample aspect ratio else if (height * video_aspect > width) { w = width; h = width / video_aspect; } else { w = height * video_aspect; h = height; } x = (width - w) / 2; y = (height - h) / 2; m_rect.setRect(x, y, w, h); double scalex = (double)m_rect.width() / pCore->getCurrentProfile()->width() * m_zoom; double scaley = (double)m_rect.width() / ((double)pCore->getCurrentProfile()->height() * pCore->getCurrentProfile()->dar() / pCore->getCurrentProfile()->width()) / pCore->getCurrentProfile()->width() * m_zoom; QPoint center = m_rect.center(); QQuickItem *rootQml = rootObject(); if (rootQml) { rootQml->setProperty("center", center); rootQml->setProperty("scalex", scalex); rootQml->setProperty("scaley", scaley); if (rootQml->objectName() == QLatin1String("rootsplit")) { // Adjust splitter pos rootQml->setProperty("splitterPos", x + (rootQml->property("realpercent").toDouble() * w)); } } emit rectChanged(); } void GLWidget::resizeEvent(QResizeEvent *event) { resizeGL(event->size().width(), event->size().height()); QQuickView::resizeEvent(event); } void GLWidget::createGPUAccelFragmentProg() { m_shader->addShaderFromSourceCode(QOpenGLShader::Fragment, "uniform sampler2D tex;" "varying highp vec2 coordinates;" "void main(void) {" " gl_FragColor = texture2D(tex, coordinates);" "}"); m_shader->link(); m_textureLocation[0] = m_shader->uniformLocation("tex"); } void GLWidget::createShader() { m_shader = new QOpenGLShaderProgram; m_shader->addShaderFromSourceCode(QOpenGLShader::Vertex, "uniform highp mat4 projection;" "uniform highp mat4 modelView;" "attribute highp vec4 vertex;" "attribute highp vec2 texCoord;" "varying highp vec2 coordinates;" "void main(void) {" " gl_Position = projection * modelView * vertex;" " coordinates = texCoord;" "}"); // C & D if (m_glslManager) { createGPUAccelFragmentProg(); } else { // A & B createYUVTextureProjectFragmentProg(); } m_projectionLocation = m_shader->uniformLocation("projection"); m_modelViewLocation = m_shader->uniformLocation("modelView"); m_vertexLocation = m_shader->attributeLocation("vertex"); m_texCoordLocation = m_shader->attributeLocation("texCoord"); } void GLWidget::createYUVTextureProjectFragmentProg() { m_shader->addShaderFromSourceCode(QOpenGLShader::Fragment, "uniform sampler2D Ytex, Utex, Vtex;" "uniform lowp int colorspace;" "varying highp vec2 coordinates;" "void main(void) {" " mediump vec3 texel;" " texel.r = texture2D(Ytex, coordinates).r - 0.0625;" // Y " texel.g = texture2D(Utex, coordinates).r - 0.5;" // U " texel.b = texture2D(Vtex, coordinates).r - 0.5;" // V " mediump mat3 coefficients;" " if (colorspace == 601) {" " coefficients = mat3(" " 1.1643, 1.1643, 1.1643," // column 1 " 0.0, -0.39173, 2.017," // column 2 " 1.5958, -0.8129, 0.0);" // column 3 " } else {" // ITU-R 709 " coefficients = mat3(" " 1.1643, 1.1643, 1.1643," // column 1 " 0.0, -0.213, 2.112," // column 2 " 1.793, -0.533, 0.0);" // column 3 " }" " gl_FragColor = vec4(coefficients * texel, 1.0);" "}"); m_shader->link(); m_textureLocation[0] = m_shader->uniformLocation("Ytex"); m_textureLocation[1] = m_shader->uniformLocation("Utex"); m_textureLocation[2] = m_shader->uniformLocation("Vtex"); m_colorspaceLocation = m_shader->uniformLocation("colorspace"); } static void uploadTextures(QOpenGLContext *context, const SharedFrame &frame, GLuint texture[]) { int width = frame.get_image_width(); int height = frame.get_image_height(); const uint8_t *image = frame.get_image(); QOpenGLFunctions *f = context->functions(); // The planes of pixel data may not be a multiple of the default 4 bytes. f->glPixelStorei(GL_UNPACK_ALIGNMENT, 1); // Upload each plane of YUV to a texture. if (texture[0] != 0u) { f->glDeleteTextures(3, texture); } check_error(f); f->glGenTextures(3, texture); check_error(f); f->glBindTexture(GL_TEXTURE_2D, texture[0]); check_error(f); f->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); check_error(f); f->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); check_error(f); f->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); check_error(f); f->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); check_error(f); f->glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, width, height, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, image); check_error(f); f->glBindTexture(GL_TEXTURE_2D, texture[1]); check_error(f); f->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); check_error(f); f->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); check_error(f); f->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); check_error(f); f->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); check_error(f); f->glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, width / 2, height / 2, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, image + width * height); check_error(f); f->glBindTexture(GL_TEXTURE_2D, texture[2]); check_error(f); f->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); check_error(f); f->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); check_error(f); f->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); check_error(f); f->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); check_error(f); f->glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, width / 2, height / 2, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, image + width * height + width / 2 * height / 2); check_error(f); } void GLWidget::clear() { stopGlsl(); update(); } void GLWidget::releaseAnalyse() { m_analyseSem.release(); } bool GLWidget::acquireSharedFrameTextures() { // A if ((m_glslManager == nullptr) && !openglContext()->supportsThreadedOpenGL()) { QMutexLocker locker(&m_contextSharedAccess); if (!m_sharedFrame.is_valid()) { return false; } uploadTextures(openglContext(), m_sharedFrame, m_texture); } else if (m_glslManager) { // C & D m_contextSharedAccess.lock(); if (m_sharedFrame.is_valid()) { m_texture[0] = *((const GLuint *)m_sharedFrame.get_image()); } } if (!m_texture[0]) { // C & D if (m_glslManager) m_contextSharedAccess.unlock(); return false; } return true; } void GLWidget::bindShaderProgram() { m_shader->bind(); // C & D if (m_glslManager) { m_shader->setUniformValue(m_textureLocation[0], 0); } else { // A & B m_shader->setUniformValue(m_textureLocation[0], 0); m_shader->setUniformValue(m_textureLocation[1], 1); m_shader->setUniformValue(m_textureLocation[2], 2); m_shader->setUniformValue(m_colorspaceLocation, pCore->getCurrentProfile()->colorspace()); } } void GLWidget::releaseSharedFrameTextures() { // C & D if (m_glslManager) { glFinish(); m_contextSharedAccess.unlock(); } } bool GLWidget::initGPUAccel() { if (!KdenliveSettings::gpu_accel()) return false; m_glslManager = new Mlt::Filter(pCore->getCurrentProfile()->profile(), "glsl.manager"); return m_glslManager->is_valid(); } // C & D // TODO: insure safe, idempotent on all pipelines. void GLWidget::disableGPUAccel() { delete m_glslManager; m_glslManager = nullptr; KdenliveSettings::setGpu_accel(false); // Need to destroy MLT global reference to prevent filters from trying to use GPU. mlt_properties_set_data(mlt_global_properties(), "glslManager", nullptr, 0, nullptr, nullptr); emit gpuNotSupported(); } bool GLWidget::onlyGLESGPUAccel() const { return (m_glslManager != nullptr) && openglContext()->isOpenGLES(); } #if defined(Q_OS_WIN) bool GLWidget::initGPUAccelSync() { // no-op // TODO: getProcAddress is not working on Windows? return false; } #else bool GLWidget::initGPUAccelSync() { if (!KdenliveSettings::gpu_accel()) return false; if (m_glslManager == nullptr) return false; if (!openglContext()->hasExtension("GL_ARB_sync")) return false; m_ClientWaitSync = (ClientWaitSync_fp)openglContext()->getProcAddress("glClientWaitSync"); if (m_ClientWaitSync) { return true; } else { qCDebug(KDENLIVE_LOG) << " / / // NO GL SYNC, ERROR"; // fallback on A || B // TODO: fallback on A || B || C? disableGPUAccel(); return false; } } #endif void GLWidget::paintGL() { QOpenGLFunctions *f = openglContext()->functions(); int width = this->width() * devicePixelRatio(); int height = this->height() * devicePixelRatio(); f->glDisable(GL_BLEND); f->glDisable(GL_DEPTH_TEST); f->glDepthMask(GL_FALSE); f->glViewport(0, (m_rulerHeight * devicePixelRatio() * 0.5 + 0.5), width, height); check_error(f); QColor color(KdenliveSettings::window_background()); f->glClearColor(color.redF(), color.greenF(), color.blueF(), color.alphaF()); f->glClear(GL_COLOR_BUFFER_BIT); check_error(f); if (!acquireSharedFrameTextures()) return; // Bind textures. for (uint i = 0; i < 3; ++i) { if (m_texture[i] != 0u) { f->glActiveTexture(GL_TEXTURE0 + i); f->glBindTexture(GL_TEXTURE_2D, m_texture[i]); check_error(f); } } bindShaderProgram(); check_error(f); // Setup an orthographic projection. QMatrix4x4 projection; projection.scale(2.0f / (float)width, 2.0f / (float)height); m_shader->setUniformValue(m_projectionLocation, projection); check_error(f); // Set model view. QMatrix4x4 modelView; if (!qFuzzyCompare(m_zoom, 1.0f)) { if ((offset().x() != 0) || (offset().y() != 0)) modelView.translate(-offset().x() * devicePixelRatio(), offset().y() * devicePixelRatio()); modelView.scale(zoom(), zoom()); } m_shader->setUniformValue(m_modelViewLocation, modelView); check_error(f); // Provide vertices of triangle strip. QVector vertices; width = m_rect.width() * devicePixelRatio(); height = m_rect.height() * devicePixelRatio(); vertices << QVector2D(float(-width) / 2.0f, float(-height) / 2.0f); vertices << QVector2D(float(-width) / 2.0f, float(height) / 2.0f); vertices << QVector2D(float(width) / 2.0f, float(-height) / 2.0f); vertices << QVector2D(float(width) / 2.0f, float(height) / 2.0f); m_shader->enableAttributeArray(m_vertexLocation); check_error(f); m_shader->setAttributeArray(m_vertexLocation, vertices.constData()); check_error(f); // Provide texture coordinates. QVector texCoord; texCoord << QVector2D(0.0f, 1.0f); texCoord << QVector2D(0.0f, 0.0f); texCoord << QVector2D(1.0f, 1.0f); texCoord << QVector2D(1.0f, 0.0f); m_shader->enableAttributeArray(m_texCoordLocation); check_error(f); m_shader->setAttributeArray(m_texCoordLocation, texCoord.constData()); check_error(f); // Render glDrawArrays(GL_TRIANGLE_STRIP, 0, vertices.size()); check_error(f); if (m_sendFrame && m_analyseSem.tryAcquire(1)) { // Render RGB frame for analysis int fullWidth = pCore->getCurrentProfile()->width(); int fullHeight = pCore->getCurrentProfile()->height(); if ((m_fbo == nullptr) || m_fbo->size() != QSize(fullWidth, fullHeight)) { delete m_fbo; QOpenGLFramebufferObjectFormat fmt; fmt.setSamples(1); fmt.setInternalTextureFormat(GL_RGB); // GL_RGBA32F); // which one is the fastest ? m_fbo = new QOpenGLFramebufferObject(fullWidth, fullHeight, fmt); // GL_TEXTURE_2D); } m_fbo->bind(); glViewport(0, 0, fullWidth, fullHeight); QMatrix4x4 projection2; projection2.scale(2.0f / (float)width, 2.0f / (float)height); m_shader->setUniformValue(m_projectionLocation, projection2); glDrawArrays(GL_TRIANGLE_STRIP, 0, vertices.size()); check_error(f); m_fbo->release(); emit analyseFrame(m_fbo->toImage()); m_sendFrame = false; } // Cleanup m_shader->disableAttributeArray(m_vertexLocation); m_shader->disableAttributeArray(m_texCoordLocation); m_shader->release(); for (uint i = 0; i < 3; ++i) { if (m_texture[i] != 0u) { f->glActiveTexture(GL_TEXTURE0 + i); f->glBindTexture(GL_TEXTURE_2D, 0); check_error(f); } } glActiveTexture(GL_TEXTURE0); check_error(f); releaseSharedFrameTextures(); check_error(f); } void GLWidget::slotZoom(bool zoomIn) { if (zoomIn) { if (qFuzzyCompare(m_zoom, 1.0f)) { setZoom(2.0f); } else if (qFuzzyCompare(m_zoom, 2.0f)) { setZoom(3.0f); } else if (m_zoom < 1.0f) { setZoom(m_zoom * 2); } } else { if (qFuzzyCompare(m_zoom, 3.0f)) { setZoom(2.0); } else if (qFuzzyCompare(m_zoom, 2.0f)) { setZoom(1.0); } else if (m_zoom > 0.2) { setZoom(m_zoom / 2); } } } void GLWidget::wheelEvent(QWheelEvent *event) { if (((event->modifiers() & Qt::ControlModifier) != 0u) && ((event->modifiers() & Qt::ShiftModifier) != 0u)) { slotZoom(event->delta() > 0); return; } emit mouseSeek(event->delta(), (uint)event->modifiers()); event->accept(); } void GLWidget::requestSeek() { if (!m_producer) { return; } if (m_proxy->seeking()) { m_producer->seek(m_proxy->seekPosition()); if (!qFuzzyIsNull(m_producer->get_speed())) { m_consumer->purge(); } if (m_consumer->is_stopped()) { m_consumer->start(); } m_consumer->set("refresh", 1); } } void GLWidget::seek(int pos) { if (!m_proxy->seeking()) { m_proxy->setSeekPosition(pos); m_producer->seek(pos); if (m_consumer->is_stopped()) { m_consumer->start(); } else { m_consumer->purge(); m_consumer->set("refresh", 1); } } else { m_proxy->setSeekPosition(pos); } } void GLWidget::requestRefresh() { if (m_proxy->seeking()) { return; } if (m_producer && qFuzzyIsNull(m_producer->get_speed())) { m_refreshTimer.start(); } } QString GLWidget::frameToTime(int frames) const { return m_consumer ? m_consumer->frames_to_time(frames, mlt_time_smpte_df) : QStringLiteral("-"); } void GLWidget::refresh() { m_refreshTimer.stop(); if (m_proxy->seeking()) { return; } QMutexLocker locker(&m_mltMutex); if (m_consumer->is_stopped()) { m_consumer->start(); } m_consumer->set("refresh", 1); } bool GLWidget::checkFrameNumber(int pos, int offset) { emit consumerPosition(pos); if (!m_proxy->setPosition(pos)) { emit seekPosition(m_proxy->seekOrCurrentPosition()); } const double speed = m_producer->get_speed(); if (m_proxy->seeking()) { m_producer->set_speed(0); m_producer->seek(m_proxy->seekPosition()); if (qFuzzyIsNull(speed)) { m_consumer->set("refresh", 1); } else { m_producer->set_speed(speed); } return true; } int maxPos = m_producer->get_int("out"); if (m_isLoopMode || m_isZoneMode) { if (qFuzzyIsNull(speed) && pos >= maxPos) { m_consumer->purge(); if (!m_isLoopMode) { return false; } m_producer->seek(m_proxy->zoneIn()); m_producer->set_speed(1.0); m_consumer->set("refresh", 1); return true; } return true; } else if (!qFuzzyIsNull(speed)) { maxPos -= offset; if (pos >= (maxPos - 1) && speed > 0.) { // Playing past last clip, pause m_producer->set_speed(0); m_consumer->set("refresh", 0); m_consumer->purge(); m_producer->seek(qMax(0, maxPos)); return false; } else if (pos <= 0 && speed < 0.) { // rewinding reached 0, pause m_producer->set_speed(0); m_consumer->set("refresh", 0); m_consumer->purge(); m_producer->seek(0); return false; } } return true; } void GLWidget::mousePressEvent(QMouseEvent *event) { if ((rootObject() != nullptr) && rootObject()->objectName() != QLatin1String("root") && !(event->modifiers() & Qt::ControlModifier) && !(event->buttons() & Qt::MiddleButton)) { event->ignore(); QQuickView::mousePressEvent(event); return; } if ((event->button() & Qt::LeftButton) != 0u) { if ((event->modifiers() & Qt::ControlModifier) != 0u) { // Pan view m_panStart = event->pos(); setCursor(Qt::ClosedHandCursor); } else { m_dragStart = event->pos(); } } else if ((event->button() & Qt::RightButton) != 0u) { emit showContextMenu(event->globalPos()); } else if ((event->button() & Qt::MiddleButton) != 0u) { m_panStart = event->pos(); setCursor(Qt::ClosedHandCursor); } event->accept(); QQuickView::mousePressEvent(event); } void GLWidget::mouseMoveEvent(QMouseEvent *event) { if ((rootObject() != nullptr) && rootObject()->objectName() != QLatin1String("root") && !(event->modifiers() & Qt::ControlModifier) && !(event->buttons() & Qt::MiddleButton)) { event->ignore(); QQuickView::mouseMoveEvent(event); return; } /* if (event->modifiers() == Qt::ShiftModifier && m_producer) { emit seekTo(m_producer->get_length() * event->x() / width()); return; }*/ QQuickView::mouseMoveEvent(event); if (!m_panStart.isNull()) { emit panView(m_panStart - event->pos()); m_panStart = event->pos(); event->accept(); QQuickView::mouseMoveEvent(event); return; } if (!(event->buttons() & Qt::LeftButton)) { QQuickView::mouseMoveEvent(event); return; } if (!event->isAccepted() && !m_dragStart.isNull() && (event->pos() - m_dragStart).manhattanLength() >= QApplication::startDragDistance()) { m_dragStart = QPoint(); emit startDrag(); } } void GLWidget::keyPressEvent(QKeyEvent *event) { QQuickView::keyPressEvent(event); if (!event->isAccepted()) { emit passKeyEvent(event); } } void GLWidget::createThread(RenderThread **thread, thread_function_t function, void *data) { #ifdef Q_OS_WIN // On Windows, MLT event consumer-thread-create is fired from the Qt main thread. while (!m_isInitialized) { qApp->processEvents(); } #else if (!m_isInitialized) { m_initSem.acquire(); } #endif (*thread) = new RenderThread(function, data, m_shareContext, &m_offscreenSurface); (*thread)->start(); } static void onThreadCreate(mlt_properties owner, GLWidget *self, RenderThread **thread, int *priority, thread_function_t function, void *data) { Q_UNUSED(owner) Q_UNUSED(priority) // self->clearFrameRenderer(); self->createThread(thread, function, data); self->lockMonitor(); } static void onThreadJoin(mlt_properties owner, GLWidget *self, RenderThread *thread) { Q_UNUSED(owner) if (thread) { thread->quit(); thread->wait(); delete thread; // self->clearFrameRenderer(); self->releaseMonitor(); } } void GLWidget::startGlsl() { // C & D if (m_glslManager) { // clearFrameRenderer(); m_glslManager->fire_event("init glsl"); if (m_glslManager->get_int("glsl_supported") == 0) { disableGPUAccel(); } else { emit started(); } } } static void onThreadStarted(mlt_properties owner, GLWidget *self) { Q_UNUSED(owner) self->startGlsl(); } void GLWidget::releaseMonitor() { emit lockMonitor(false); } void GLWidget::lockMonitor() { emit lockMonitor(true); } void GLWidget::stopGlsl() { if (m_consumer) { m_consumer->purge(); } // C & D // TODO This is commented out for now because it is causing crashes. // Technically, this should be the correct thing to do, but it appears // some changes have created regression (see shotcut) // with respect to restarting the consumer in GPU mode. // m_glslManager->fire_event("close glsl"); m_texture[0] = 0; } static void onThreadStopped(mlt_properties owner, GLWidget *self) { Q_UNUSED(owner) self->stopGlsl(); } void GLWidget::slotSwitchAudioOverlay(bool enable) { KdenliveSettings::setDisplayAudioOverlay(enable); if (m_audioWaveDisplayed && !enable) { if (m_producer && m_producer->get_int("video_index") != -1) { // We have a video producer, disable filter removeAudioOverlay(); } } if (enable && !m_audioWaveDisplayed && m_producer) { createAudioOverlay(m_producer->get_int("video_index") == -1); } } int GLWidget::setProducer(const std::shared_ptr &producer, bool isActive, int position) { int error = 0; QString currentId; int consumerPosition = 0; currentId = m_producer->parent().get("kdenlive:id"); if (producer) { m_producer = producer; } else { if (currentId == QLatin1String("black")) { return 0; } if (m_audioWaveDisplayed) { removeAudioOverlay(); } m_producer = m_blackClip; + // Reset markersModel + rootContext()->setContextProperty("markersModel", 0); } // redundant check. postcondition of above is m_producer != null if (m_producer) { m_producer->set_speed(0); if (m_consumer) { consumerPosition = m_consumer->position(); m_consumer->stop(); if (!m_consumer->is_stopped()) { m_consumer->stop(); } } error = reconfigure(); if (error == 0) { // The profile display aspect ratio may have changed. resizeGL(width(), height()); } } else { return error; } if (!m_consumer) { return error; } consumerPosition = m_consumer->position(); if (m_producer->get_int("video_index") == -1) { // This is an audio only clip, attach visualization filter. Currently, the filter crashes MLT when Movit accel is used if (!m_audioWaveDisplayed) { createAudioOverlay(true); } else if (m_consumer) { if (KdenliveSettings::gpu_accel()) { removeAudioOverlay(); } else { adjustAudioOverlay(true); } } } else if (m_audioWaveDisplayed && (m_consumer != nullptr)) { // This is not an audio clip, hide wave if (KdenliveSettings::displayAudioOverlay()) { adjustAudioOverlay(m_producer->get_int("video_index") == -1); } else { removeAudioOverlay(); } } else if (KdenliveSettings::displayAudioOverlay()) { createAudioOverlay(false); } if (position == -1 && m_producer->parent().get("kdenlive:id") == currentId) { position = consumerPosition; } if (isActive) { startConsumer(); } m_proxy->requestSeekPosition(position > 0 ? position : m_producer->position()); return error; } int GLWidget::droppedFrames() const { return (m_consumer ? m_consumer->get_int("drop_count") : 0); } void GLWidget::resetDrops() { if (m_consumer) { m_consumer->set("drop_count", 0); } } void GLWidget::createAudioOverlay(bool isAudio) { if (!m_consumer) { return; } if (isAudio && KdenliveSettings::gpu_accel()) { // Audiowaveform filter crashes on Movit + audio clips) return; } Mlt::Filter f(pCore->getCurrentProfile()->profile(), "audiowaveform"); if (f.is_valid()) { // f.set("show_channel", 1); f.set("color.1", "0xffff0099"); f.set("fill", 1); if (isAudio) { // Fill screen f.set("rect", "0,0,100%,100%"); } else { // Overlay on lower part of the screen f.set("rect", "0,80%,100%,20%"); } m_consumer->attach(f); m_audioWaveDisplayed = true; } } void GLWidget::removeAudioOverlay() { Mlt::Service sourceService(m_consumer->get_service()); // move all effects to the correct producer int ct = 0; Mlt::Filter *filter = sourceService.filter(ct); while (filter != nullptr) { QString srv = filter->get("mlt_service"); if (srv == QLatin1String("audiowaveform")) { sourceService.detach(*filter); delete filter; break; } else { ct++; } filter = sourceService.filter(ct); } m_audioWaveDisplayed = false; } void GLWidget::adjustAudioOverlay(bool isAudio) { Mlt::Service sourceService(m_consumer->get_service()); // move all effects to the correct producer int ct = 0; Mlt::Filter *filter = sourceService.filter(ct); while (filter != nullptr) { QString srv = filter->get("mlt_service"); if (srv == QLatin1String("audiowaveform")) { if (isAudio) { filter->set("rect", "0,0,100%,100%"); } else { filter->set("rect", "0,80%,100%,20%"); } break; } else { ct++; } filter = sourceService.filter(ct); } } void GLWidget::stopCapture() { if (strcmp(m_consumer->get("mlt_service"), "multi") == 0) { m_consumer->set("refresh", 0); m_consumer->purge(); m_consumer->stop(); } } int GLWidget::reconfigureMulti(const QString ¶ms, const QString &path, Mlt::Profile *profile) { Q_UNUSED(params); Q_UNUSED(path); Q_UNUSED(profile); // TODO Fix or delete /* QString serviceName = property("mlt_service").toString(); if ((m_consumer == nullptr) || !m_consumer->is_valid() || strcmp(m_consumer->get("mlt_service"), "multi") != 0) { if (m_consumer) { m_consumer->purge(); m_consumer->stop(); m_consumer.reset(); } m_consumer.reset(new Mlt::FilteredConsumer(*profile, "multi")); delete m_threadStartEvent; m_threadStartEvent = nullptr; delete m_threadStopEvent; m_threadStopEvent = nullptr; delete m_threadCreateEvent; delete m_threadJoinEvent; if (m_consumer) { m_threadCreateEvent = m_consumer->listen("consumer-thread-create", this, (mlt_listener)onThreadCreate); m_threadJoinEvent = m_consumer->listen("consumer-thread-join", this, (mlt_listener)onThreadJoin); } } if (m_consumer->is_valid()) { // build sub consumers // m_consumer->set("mlt_image_format", "yuv422"); reloadProfile(); int volume = KdenliveSettings::volume(); m_consumer->set("0", serviceName.toUtf8().constData()); m_consumer->set("0.mlt_image_format", "yuv422"); m_consumer->set("0.terminate_on_pause", 0); // m_consumer->set("0.preview_off", 1); m_consumer->set("0.real_time", 0); m_consumer->set("0.volume", (double)volume / 100); if (serviceName.startsWith(QLatin1String("sdl_audio"))) { #ifdef Q_OS_WIN m_consumer->set("0.audio_buffer", 2048); #else m_consumer->set("0.audio_buffer", 512); #endif QString audioDevice = KdenliveSettings::audiodevicename(); if (!audioDevice.isEmpty()) { m_consumer->set("audio_device", audioDevice.toUtf8().constData()); } QString audioDriver = KdenliveSettings::audiodrivername(); if (!audioDriver.isEmpty()) { m_consumer->set("audio_driver", audioDriver.toUtf8().constData()); } } m_consumer->set("1", "avformat"); m_consumer->set("1.target", path.toUtf8().constData()); // m_consumer->set("1.real_time", -KdenliveSettings::mltthreads()); m_consumer->set("terminate_on_pause", 0); m_consumer->set("1.terminate_on_pause", 0); // m_consumer->set("1.terminate_on_pause", 0);// was commented out. restoring it fixes mantis#3415 - FFmpeg recording freezes QStringList paramList = params.split(' ', QString::SkipEmptyParts); for (int i = 0; i < paramList.count(); ++i) { QString key = "1." + paramList.at(i).section(QLatin1Char('='), 0, 0); QString value = paramList.at(i).section(QLatin1Char('='), 1, 1); if (value == QLatin1String("%threads")) { value = QString::number(QThread::idealThreadCount()); } m_consumer->set(key.toUtf8().constData(), value.toUtf8().constData()); } // Connect the producer to the consumer - tell it to "run" later delete m_displayEvent; // C & D if (m_glslManager) { // D if (m_openGLSync) { m_displayEvent = m_consumer->listen("consumer-frame-show", this, (mlt_listener)on_gl_frame_show); } else { // C m_displayEvent = m_consumer->listen("consumer-frame-show", this, (mlt_listener)on_gl_nosync_frame_show); } } else { // A & B m_displayEvent = m_consumer->listen("consumer-frame-show", this, (mlt_listener)on_frame_show); } m_consumer->connect(*m_producer.get()); m_consumer->start(); return 0; } */ return -1; } int GLWidget::reconfigure(bool reload) { int error = 0; // use SDL for audio, OpenGL for video QString serviceName = property("mlt_service").toString(); if (reload) { m_blackClip.reset(new Mlt::Producer(pCore->getCurrentProfile()->profile(), "color:black")); m_blackClip->set("kdenlive:id", "black"); reloadProfile(); return error; } if ((m_consumer == nullptr) || !m_consumer->is_valid() || strcmp(m_consumer->get("mlt_service"), "multi") == 0) { if (m_consumer) { m_consumer->purge(); m_consumer->stop(); m_consumer.reset(); } QString audioBackend = (KdenliveSettings::external_display()) ? QString("decklink:%1").arg(KdenliveSettings::blackmagic_output_device()) : KdenliveSettings::audiobackend(); if (m_consumer == nullptr || serviceName.isEmpty() || serviceName != audioBackend) { m_consumer.reset(new Mlt::FilteredConsumer(pCore->getCurrentProfile()->profile(), audioBackend.toLatin1().constData())); if (m_consumer->is_valid()) { serviceName = audioBackend; setProperty("mlt_service", serviceName); if (KdenliveSettings::external_display()) { m_consumer->set("terminate_on_pause", 0); } } else { // Warning, audio backend unavailable on system m_consumer.reset(); QStringList backends = {"sdl2_audio", "sdl_audio", "rtaudio"}; for (const QString &bk : backends) { if (bk == audioBackend) { // Already tested continue; } m_consumer.reset(new Mlt::FilteredConsumer(pCore->getCurrentProfile()->profile(), bk.toLatin1().constData())); if (m_consumer->is_valid()) { if (audioBackend == KdenliveSettings::sdlAudioBackend()) { // switch sdl audio backend KdenliveSettings::setSdlAudioBackend(bk); } qDebug() << "++++++++\nSwitching audio backend to: " << bk << "\n++++++++++"; KdenliveSettings::setAudiobackend(bk); serviceName = bk; setProperty("mlt_service", serviceName); break; } else { m_consumer.reset(); } } if (!m_consumer) { qWarning() << "WARNING, NO AUDIO BACKEND FOUND"; return -1; } } } delete m_threadStartEvent; m_threadStartEvent = nullptr; delete m_threadStopEvent; m_threadStopEvent = nullptr; delete m_threadCreateEvent; delete m_threadJoinEvent; if (m_consumer) { m_threadCreateEvent = m_consumer->listen("consumer-thread-create", this, (mlt_listener)onThreadCreate); m_threadJoinEvent = m_consumer->listen("consumer-thread-join", this, (mlt_listener)onThreadJoin); } } if (m_consumer->is_valid()) { // Connect the producer to the consumer - tell it to "run" later if (m_producer) { m_consumer->connect(*m_producer.get()); // m_producer->set_speed(0.0); } int dropFrames = realTime(); if (!KdenliveSettings::monitor_dropframes()) { dropFrames = -dropFrames; } m_consumer->set("real_time", dropFrames); // C & D if (m_glslManager) { if (!m_threadStartEvent) { m_threadStartEvent = m_consumer->listen("consumer-thread-started", this, (mlt_listener)onThreadStarted); } if (!m_threadStopEvent) { m_threadStopEvent = m_consumer->listen("consumer-thread-stopped", this, (mlt_listener)onThreadStopped); } if (!serviceName.startsWith(QLatin1String("decklink"))) { m_consumer->set("mlt_image_format", "glsl"); } } else { // A & B m_consumer->set("mlt_image_format", "yuv422"); } delete m_displayEvent; // C & D if (m_glslManager) { m_displayEvent = m_consumer->listen("consumer-frame-show", this, (mlt_listener)on_gl_frame_show); } else { // A & B m_displayEvent = m_consumer->listen("consumer-frame-show", this, (mlt_listener)on_frame_show); } int volume = KdenliveSettings::volume(); if (serviceName.startsWith(QLatin1String("sdl_audio"))) { QString audioDevice = KdenliveSettings::audiodevicename(); if (!audioDevice.isEmpty()) { m_consumer->set("audio_device", audioDevice.toUtf8().constData()); } QString audioDriver = KdenliveSettings::audiodrivername(); if (!audioDriver.isEmpty()) { m_consumer->set("audio_driver", audioDriver.toUtf8().constData()); } } /*if (!pCore->getCurrentProfile()->progressive()) m_consumer->set("progressive", property("progressive").toBool());*/ m_consumer->set("volume", volume / 100.0); // m_consumer->set("progressive", 1); m_consumer->set("rescale", KdenliveSettings::mltinterpolation().toUtf8().constData()); m_consumer->set("deinterlace_method", KdenliveSettings::mltdeinterlacer().toUtf8().constData()); /* #ifdef Q_OS_WIN m_consumer->set("audio_buffer", 2048); #else m_consumer->set("audio_buffer", 512); #endif */ m_consumer->set("buffer", 25); m_consumer->set("prefill", 1); m_consumer->set("scrub_audio", 1); if (KdenliveSettings::monitor_gamma() == 0) { m_consumer->set("color_trc", "iec61966_2_1"); } else { m_consumer->set("color_trc", "bt709"); } } else { // Cleanup on error error = 2; } return error; } float GLWidget::zoom() const { return m_zoom; } float GLWidget::scale() const { return (double)m_rect.width() / pCore->getCurrentProfile()->width() * m_zoom; } void GLWidget::reloadProfile() { // The profile display aspect ratio may have changed. if (m_consumer) { // Make sure to delete and rebuild consumer to match profile m_consumer->purge(); m_consumer->stop(); m_consumer.reset(); reconfigure(); } resizeGL(width(), height()); refreshSceneLayout(); } QSize GLWidget::profileSize() const { return {pCore->getCurrentProfile()->width(), pCore->getCurrentProfile()->height()}; } QRect GLWidget::displayRect() const { return m_rect; } QPoint GLWidget::offset() const { return {m_offset.x() - ((int)((float)pCore->getCurrentProfile()->width() * m_zoom) - width()) / 2, m_offset.y() - ((int)((float)pCore->getCurrentProfile()->height() * m_zoom) - height()) / 2}; } void GLWidget::setZoom(float zoom) { double zoomRatio = zoom / m_zoom; m_zoom = zoom; emit zoomChanged(); if (rootObject()) { rootObject()->setProperty("zoom", m_zoom); double scalex = rootObject()->property("scalex").toDouble() * zoomRatio; rootObject()->setProperty("scalex", scalex); double scaley = rootObject()->property("scaley").toDouble() * zoomRatio; rootObject()->setProperty("scaley", scaley); } update(); } void GLWidget::onFrameDisplayed(const SharedFrame &frame) { m_contextSharedAccess.lock(); m_sharedFrame = frame; m_sendFrame = sendFrameForAnalysis; m_contextSharedAccess.unlock(); update(); } void GLWidget::mouseReleaseEvent(QMouseEvent *event) { QQuickView::mouseReleaseEvent(event); if (m_dragStart.isNull() && m_panStart.isNull() && (rootObject() != nullptr) && rootObject()->objectName() != QLatin1String("root") && !(event->modifiers() & Qt::ControlModifier)) { event->ignore(); return; } if (!m_dragStart.isNull() && m_panStart.isNull() && ((event->button() & Qt::LeftButton) != 0u) && !event->isAccepted()) { emit monitorPlay(); } m_dragStart = QPoint(); m_panStart = QPoint(); setCursor(Qt::ArrowCursor); } void GLWidget::mouseDoubleClickEvent(QMouseEvent *event) { QQuickView::mouseDoubleClickEvent(event); if (event->isAccepted()) { return; } if ((rootObject() == nullptr) || rootObject()->objectName() != QLatin1String("rooteffectscene")) { emit switchFullScreen(); } event->accept(); } void GLWidget::setOffsetX(int x, int max) { m_offset.setX(x); emit offsetChanged(); if (rootObject()) { rootObject()->setProperty("offsetx", m_zoom > 1.0f ? x - max / 2.0 - 10 : 0); } update(); } void GLWidget::setOffsetY(int y, int max) { m_offset.setY(y); if (rootObject()) { rootObject()->setProperty("offsety", m_zoom > 1.0f ? y - max / 2.0 - 10 : 0); } update(); } int GLWidget::realTime() const { // C & D if (m_glslManager) { return 1; } return KdenliveSettings::mltthreads(); } std::shared_ptr GLWidget::consumer() { return m_consumer; } void GLWidget::updateGamma() { reconfigure(); } void GLWidget::resetConsumer(bool fullReset) { if (fullReset && m_consumer) { m_consumer->purge(); m_consumer->stop(); m_consumer.reset(); } reconfigure(); } const QString GLWidget::sceneList(const QString &root, const QString &fullPath) { QString playlist; qCDebug(KDENLIVE_LOG) << " * * *Setting document xml root: " << root; Mlt::Consumer xmlConsumer(pCore->getCurrentProfile()->profile(), "xml", fullPath.isEmpty() ? "kdenlive_playlist" : fullPath.toUtf8().constData()); if (!root.isEmpty()) { xmlConsumer.set("root", root.toUtf8().constData()); } if (!xmlConsumer.is_valid()) { return QString(); } m_producer->optimise(); xmlConsumer.set("terminate_on_pause", 1); xmlConsumer.set("store", "kdenlive"); xmlConsumer.set("time_format", "clock"); // Disabling meta creates cleaner files, but then we don't have access to metadata on the fly (meta channels, etc) // And we must use "avformat" instead of "avformat-novalidate" on project loading which causes a big delay on project opening // xmlConsumer.set("no_meta", 1); Mlt::Producer prod(m_producer->get_producer()); if (!prod.is_valid()) { return QString(); } xmlConsumer.connect(prod); xmlConsumer.run(); playlist = fullPath.isEmpty() ? QString::fromUtf8(xmlConsumer.get("kdenlive_playlist")) : fullPath; return playlist; } void GLWidget::updateTexture(GLuint yName, GLuint uName, GLuint vName) { m_texture[0] = yName; m_texture[1] = uName; m_texture[2] = vName; m_sendFrame = sendFrameForAnalysis; // update(); } // MLT consumer-frame-show event handler void GLWidget::on_frame_show(mlt_consumer, void *self, mlt_frame frame_ptr) { Mlt::Frame frame(frame_ptr); if (frame.get_int("rendered") != 0) { auto *widget = static_cast(self); int timeout = (widget->consumer()->get_int("real_time") > 0) ? 0 : 1000; if ((widget->m_frameRenderer != nullptr) && widget->m_frameRenderer->semaphore()->tryAcquire(1, timeout)) { QMetaObject::invokeMethod(widget->m_frameRenderer, "showFrame", Qt::QueuedConnection, Q_ARG(Mlt::Frame, frame)); } } } void GLWidget::on_gl_nosync_frame_show(mlt_consumer, void *self, mlt_frame frame_ptr) { Mlt::Frame frame(frame_ptr); if (frame.get_int("rendered") != 0) { auto *widget = static_cast(self); int timeout = (widget->consumer()->get_int("real_time") > 0) ? 0 : 1000; if ((widget->m_frameRenderer != nullptr) && widget->m_frameRenderer->semaphore()->tryAcquire(1, timeout)) { QMetaObject::invokeMethod(widget->m_frameRenderer, "showGLNoSyncFrame", Qt::QueuedConnection, Q_ARG(Mlt::Frame, frame)); } } } void GLWidget::on_gl_frame_show(mlt_consumer, void *self, mlt_frame frame_ptr) { Mlt::Frame frame(frame_ptr); if (frame.get_int("rendered") != 0) { auto *widget = static_cast(self); int timeout = (widget->consumer()->get_int("real_time") > 0) ? 0 : 1000; if ((widget->m_frameRenderer != nullptr) && widget->m_frameRenderer->semaphore()->tryAcquire(1, timeout)) { QMetaObject::invokeMethod(widget->m_frameRenderer, "showGLFrame", Qt::QueuedConnection, Q_ARG(Mlt::Frame, frame)); } } } RenderThread::RenderThread(thread_function_t function, void *data, QOpenGLContext *context, QSurface *surface) : QThread(nullptr) , m_function(function) , m_data(data) , m_context(nullptr) , m_surface(surface) { if (context) { m_context = new QOpenGLContext; m_context->setFormat(context->format()); m_context->setShareContext(context); m_context->create(); m_context->moveToThread(this); } } RenderThread::~RenderThread() { // would otherwise leak if RenderThread is allocated with a context but not run. // safe post-run delete m_context; } // TODO: missing some exception handling? void RenderThread::run() { if (m_context) { m_context->makeCurrent(m_surface); } m_function(m_data); if (m_context) { m_context->doneCurrent(); delete m_context; m_context = nullptr; } } FrameRenderer::FrameRenderer(QOpenGLContext *shareContext, QSurface *surface, GLWidget::ClientWaitSync_fp clientWaitSync) : QThread(nullptr) , m_semaphore(3) , m_context(nullptr) , m_surface(surface) , m_ClientWaitSync(clientWaitSync) , m_gl32(nullptr) , sendAudioForAnalysis(false) { Q_ASSERT(shareContext); m_renderTexture[0] = m_renderTexture[1] = m_renderTexture[2] = 0; m_displayTexture[0] = m_displayTexture[1] = m_displayTexture[2] = 0; // B & C & D if (KdenliveSettings::gpu_accel() || shareContext->supportsThreadedOpenGL()) { m_context = new QOpenGLContext; m_context->setFormat(shareContext->format()); m_context->setShareContext(shareContext); m_context->create(); m_context->moveToThread(this); } setObjectName(QStringLiteral("FrameRenderer")); moveToThread(this); start(); } FrameRenderer::~FrameRenderer() { delete m_context; delete m_gl32; } void FrameRenderer::showFrame(Mlt::Frame frame) { int width = 0; int height = 0; mlt_image_format format = mlt_image_yuv420p; frame.get_image(format, width, height); // Save this frame for future use and to keep a reference to the GL Texture. m_displayFrame = SharedFrame(frame); if ((m_context != nullptr) && m_context->isValid()) { m_context->makeCurrent(m_surface); // Upload each plane of YUV to a texture. QOpenGLFunctions *f = m_context->functions(); uploadTextures(m_context, m_displayFrame, m_renderTexture); f->glBindTexture(GL_TEXTURE_2D, 0); check_error(f); f->glFinish(); for (int i = 0; i < 3; ++i) { std::swap(m_renderTexture[i], m_displayTexture[i]); } emit textureReady(m_displayTexture[0], m_displayTexture[1], m_displayTexture[2]); m_context->doneCurrent(); } // The frame is now done being modified and can be shared with the rest // of the application. emit frameDisplayed(m_displayFrame); m_semaphore.release(); } void FrameRenderer::showGLFrame(Mlt::Frame frame) { if ((m_context != nullptr) && m_context->isValid()) { int width = 0; int height = 0; frame.set("movit.convert.use_texture", 1); mlt_image_format format = mlt_image_glsl_texture; frame.get_image(format, width, height); m_context->makeCurrent(m_surface); pipelineSyncToFrame(frame); m_context->functions()->glFinish(); m_context->doneCurrent(); // Save this frame for future use and to keep a reference to the GL Texture. m_displayFrame = SharedFrame(frame); } // The frame is now done being modified and can be shared with the rest // of the application. emit frameDisplayed(m_displayFrame); m_semaphore.release(); } void FrameRenderer::showGLNoSyncFrame(Mlt::Frame frame) { if ((m_context != nullptr) && m_context->isValid()) { int width = 0; int height = 0; frame.set("movit.convert.use_texture", 1); mlt_image_format format = mlt_image_glsl_texture; frame.get_image(format, width, height); m_context->makeCurrent(m_surface); m_context->functions()->glFinish(); m_context->doneCurrent(); // Save this frame for future use and to keep a reference to the GL Texture. m_displayFrame = SharedFrame(frame); } // The frame is now done being modified and can be shared with the rest // of the application. emit frameDisplayed(m_displayFrame); m_semaphore.release(); } void FrameRenderer::cleanup() { if ((m_renderTexture[0] != 0u) && (m_renderTexture[1] != 0u) && (m_renderTexture[2] != 0u)) { m_context->makeCurrent(m_surface); m_context->functions()->glDeleteTextures(3, m_renderTexture); if ((m_displayTexture[0] != 0u) && (m_displayTexture[1] != 0u) && (m_displayTexture[2] != 0u)) { m_context->functions()->glDeleteTextures(3, m_displayTexture); } m_context->doneCurrent(); m_renderTexture[0] = m_renderTexture[1] = m_renderTexture[2] = 0; m_displayTexture[0] = m_displayTexture[1] = m_displayTexture[2] = 0; } } // D void FrameRenderer::pipelineSyncToFrame(Mlt::Frame &frame) { auto sync = (GLsync)frame.get_data("movit.convert.fence"); if (!sync) return; #ifdef Q_OS_WIN // On Windows, use QOpenGLFunctions_3_2_Core instead of getProcAddress. // TODO: move to initialization of m_ClientWaitSync if (!m_gl32) { m_gl32 = m_context->versionFunctions(); if (m_gl32) { m_gl32->initializeOpenGLFunctions(); } } if (m_gl32) { m_gl32->glClientWaitSync(sync, 0, GL_TIMEOUT_IGNORED); check_error(m_context->functions()); } #else if (m_ClientWaitSync) { m_ClientWaitSync(sync, 0, GL_TIMEOUT_IGNORED); check_error(m_context->functions()); } #endif // Q_OS_WIN } void GLWidget::setAudioThumb(int channels, const QList &audioCache) { if (!rootObject()) return; auto *audioThumbDisplay = rootObject()->findChild(QStringLiteral("audiothumb")); if (!audioThumbDisplay) return; QImage img(width(), height() / 6, QImage::Format_ARGB32_Premultiplied); img.fill(Qt::transparent); if (!audioCache.isEmpty() && channels > 0) { int audioLevelCount = audioCache.count() - 1; // simplified audio QPainter painter(&img); QRectF mappedRect(0, 0, img.width(), img.height()); int channelHeight = mappedRect.height(); double value; double scale = (double)width() / (audioLevelCount / channels); if (scale < 1) { painter.setPen(QColor(80, 80, 150, 200)); for (int i = 0; i < img.width(); i++) { int framePos = i / scale; value = audioCache.at(qMin(framePos * channels, audioLevelCount)) / 256; for (int channel = 1; channel < channels; channel++) { value = qMax(value, audioCache.at(qMin(framePos * channels + channel, audioLevelCount)) / 256); } painter.drawLine(i, mappedRect.bottom() - (value * channelHeight), i, mappedRect.bottom()); } } else { QPainterPath positiveChannelPath; positiveChannelPath.moveTo(0, mappedRect.bottom()); for (int i = 0; i < audioLevelCount / channels; i++) { value = audioCache.at(qMin(i * channels, audioLevelCount)) / 256; for (int channel = 1; channel < channels; channel++) { value = qMax(value, audioCache.at(qMin(i * channels + channel, audioLevelCount)) / 256); } positiveChannelPath.lineTo(i * scale, mappedRect.bottom() - (value * channelHeight)); } positiveChannelPath.lineTo(mappedRect.right(), mappedRect.bottom()); painter.setPen(Qt::NoPen); painter.setBrush(QBrush(QColor(80, 80, 150, 200))); painter.drawPath(positiveChannelPath); } painter.end(); } audioThumbDisplay->setImage(img); } void GLWidget::refreshSceneLayout() { if (!rootObject()) { return; } rootObject()->setProperty("profile", QPoint(pCore->getCurrentProfile()->width(), pCore->getCurrentProfile()->height())); rootObject()->setProperty("scalex", (double)m_rect.width() / pCore->getCurrentProfile()->width() * m_zoom); rootObject()->setProperty("scaley", (double)m_rect.width() / (((double)pCore->getCurrentProfile()->height() * pCore->getCurrentProfile()->dar() / pCore->getCurrentProfile()->width())) / pCore->getCurrentProfile()->width() * m_zoom); } void GLWidget::switchPlay(bool play, double speed) { m_proxy->setSeekPosition(-1); if (!m_producer || !m_consumer) { return; } if (m_isZoneMode) { resetZoneMode(); } if (play) { if (m_id == Kdenlive::ClipMonitor && m_consumer->position() == m_producer->get_out()) { m_producer->seek(0); } m_producer->set_speed(speed); m_consumer->start(); m_consumer->set("refresh", 1); } else { m_producer->set_speed(0); m_producer->seek(m_consumer->position() + 1); m_consumer->purge(); m_consumer->start(); } } bool GLWidget::playZone(bool loop) { if (!m_producer || m_proxy->zoneOut() <= m_proxy->zoneIn()) { pCore->displayMessage(i18n("Select a zone to play"), InformationMessage, 500); return false; } m_proxy->setSeekPosition(-1); m_producer->seek(m_proxy->zoneIn()); m_producer->set_speed(0); m_consumer->purge(); m_producer->set("out", m_proxy->zoneOut()); m_producer->set_speed(1.0); if (m_consumer->is_stopped()) { m_consumer->start(); } m_consumer->set("refresh", 1); m_isZoneMode = true; m_isLoopMode = loop; return true; } bool GLWidget::loopClip() { if (!m_producer || m_proxy->zoneOut() <= m_proxy->zoneIn()) { pCore->displayMessage(i18n("Select a zone to play"), InformationMessage, 500); return false; } m_proxy->setSeekPosition(-1); m_producer->seek(0); m_producer->set_speed(0); m_consumer->purge(); m_producer->set("out", m_producer->get_playtime()); m_producer->set_speed(1.0); if (m_consumer->is_stopped()) { m_consumer->start(); } m_consumer->set("refresh", 1); m_isZoneMode = true; m_isLoopMode = true; return true; } void GLWidget::resetZoneMode() { if (!m_isZoneMode && !m_isLoopMode) { return; } m_producer->set("out", m_producer->get_length()); m_isZoneMode = false; m_isLoopMode = false; } MonitorProxy *GLWidget::getControllerProxy() { return m_proxy; } int GLWidget::getCurrentPos() const { return m_proxy->seeking() ? m_proxy->seekPosition() : m_consumer->position(); } void GLWidget::setRulerInfo(int duration, const std::shared_ptr &model) { rootObject()->setProperty("duration", duration); if (model != nullptr) { // we are resetting marker/snap model, reset zone rootContext()->setContextProperty("markersModel", model.get()); } } void GLWidget::startConsumer() { if (m_consumer == nullptr) { return; } if (m_consumer->is_stopped() && m_consumer->start() == -1) { // ARGH CONSUMER BROKEN!!!! KMessageBox::error( qApp->activeWindow(), i18n("Could not create the video preview window.\nThere is something wrong with your Kdenlive install or your driver settings, please fix it.")); if (m_displayEvent) { delete m_displayEvent; } m_displayEvent = nullptr; m_consumer.reset(); return; } m_consumer->set("refresh", 1); } void GLWidget::stop() { m_refreshTimer.stop(); m_proxy->setSeekPosition(-1); // why this lock? QMutexLocker locker(&m_mltMutex); if (m_producer) { if (m_isZoneMode) { resetZoneMode(); } m_producer->set_speed(0.0); } if (m_consumer) { m_consumer->purge(); if (!m_consumer->is_stopped()) { m_consumer->stop(); } } } double GLWidget::playSpeed() const { if (m_producer) { return m_producer->get_speed(); } return 0.0; } void GLWidget::setDropFrames(bool drop) { // why this lock? QMutexLocker locker(&m_mltMutex); if (m_consumer) { int dropFrames = realTime(); if (!drop) { dropFrames = -dropFrames; } m_consumer->stop(); m_consumer->set("real_time", dropFrames); if (m_consumer->start() == -1) { qCWarning(KDENLIVE_LOG) << "ERROR, Cannot start monitor"; } } } int GLWidget::volume() const { if ((!m_consumer) || (!m_producer)) { return -1; } if (m_consumer->get("mlt_service") == QStringLiteral("multi")) { return ((int)100 * m_consumer->get_double("0.volume")); } return ((int)100 * m_consumer->get_double("volume")); } void GLWidget::setVolume(double volume) { if (m_consumer) { if (m_consumer->get("mlt_service") == QStringLiteral("multi")) { m_consumer->set("0.volume", volume); } else { m_consumer->set("volume", volume); } } } int GLWidget::duration() const { if (!m_producer) { return 0; } return m_producer->get_playtime(); } void GLWidget::setConsumerProperty(const QString &name, const QString &value) { QMutexLocker locker(&m_mltMutex); if (m_consumer) { m_consumer->set(name.toUtf8().constData(), value.toUtf8().constData()); if (m_consumer->start() == -1) { qCWarning(KDENLIVE_LOG) << "ERROR, Cannot start monitor"; } } } diff --git a/src/monitor/monitor.cpp b/src/monitor/monitor.cpp index 65705bf38..9e6a8440d 100644 --- a/src/monitor/monitor.cpp +++ b/src/monitor/monitor.cpp @@ -1,2120 +1,2121 @@ /*************************************************************************** * Copyright (C) 2007 by Jean-Baptiste Mardelle (jb@kdenlive.org) * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * ***************************************************************************/ #include "monitor.h" #include "bin/bin.h" #include "bin/projectclip.h" #include "core.h" #include "dialogs/profilesdialog.h" #include "doc/kdenlivedoc.h" #include "doc/kthumb.h" #include "glwidget.h" #include "kdenlivesettings.h" #include "lib/audio/audioStreamInfo.h" #include "mainwindow.h" #include "mltcontroller/clipcontroller.h" #include "monitorproxy.h" #include "profiles/profilemodel.hpp" #include "project/projectmanager.h" #include "qmlmanager.h" #include "recmanager.h" #include "scopes/monitoraudiolevel.h" #include "timeline2/model/snapmodel.hpp" #include "transitions/transitionsrepository.hpp" #include "klocalizedstring.h" #include #include #include #include #include #include #include #include #include "kdenlive_debug.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define SEEK_INACTIVE (-1) QuickEventEater::QuickEventEater(QObject *parent) : QObject(parent) { } bool QuickEventEater::eventFilter(QObject *obj, QEvent *event) { switch (event->type()) { case QEvent::DragEnter: { auto *ev = reinterpret_cast(event); if (ev->mimeData()->hasFormat(QStringLiteral("kdenlive/effect"))) { ev->acceptProposedAction(); return true; } break; } case QEvent::DragMove: { auto *ev = reinterpret_cast(event); if (ev->mimeData()->hasFormat(QStringLiteral("kdenlive/effect"))) { ev->acceptProposedAction(); return true; } break; } case QEvent::Drop: { auto *ev = static_cast(event); if (ev) { QStringList effectData; effectData << QString::fromUtf8(ev->mimeData()->data(QStringLiteral("kdenlive/effect"))); QStringList source = QString::fromUtf8(ev->mimeData()->data(QStringLiteral("kdenlive/effectsource"))).split(QLatin1Char('-')); effectData << source; emit addEffect(effectData); ev->accept(); return true; } break; } default: break; } return QObject::eventFilter(obj, event); } QuickMonitorEventEater::QuickMonitorEventEater(QWidget *parent) : QObject(parent) { } bool QuickMonitorEventEater::eventFilter(QObject *obj, QEvent *event) { if (event->type() == QEvent::KeyPress) { auto *ev = static_cast(event); if (ev) { emit doKeyPressEvent(ev); return true; } } return QObject::eventFilter(obj, event); } Monitor::Monitor(Kdenlive::MonitorId id, MonitorManager *manager, QWidget *parent) : AbstractMonitor(id, manager, parent) , m_controller(nullptr) , m_glMonitor(nullptr) , m_snaps(new SnapModel()) , m_splitEffect(nullptr) , m_splitProducer(nullptr) , m_dragStarted(false) , m_recManager(nullptr) , m_loopClipAction(nullptr) , m_sceneVisibilityAction(nullptr) , m_multitrackView(nullptr) , m_contextMenu(nullptr) , m_loopClipTransition(true) , m_editMarker(nullptr) , m_forceSizeFactor(0) , m_lastMonitorSceneType(MonitorSceneDefault) { auto *layout = new QVBoxLayout; layout->setContentsMargins(0, 0, 0, 0); layout->setSpacing(0); // Create container widget m_glWidget = new QWidget; auto *glayout = new QGridLayout(m_glWidget); glayout->setSpacing(0); glayout->setContentsMargins(0, 0, 0, 0); // Create QML OpenGL widget m_glMonitor = new GLWidget((int)id); connect(m_glMonitor, &GLWidget::passKeyEvent, this, &Monitor::doKeyPressEvent); connect(m_glMonitor, &GLWidget::panView, this, &Monitor::panView); connect(m_glMonitor, &GLWidget::seekPosition, this, &Monitor::seekPosition, Qt::DirectConnection); connect(m_glMonitor, &GLWidget::consumerPosition, this, &Monitor::slotSeekPosition, Qt::DirectConnection); connect(m_glMonitor, &GLWidget::activateMonitor, this, &AbstractMonitor::slotActivateMonitor, Qt::DirectConnection); m_videoWidget = QWidget::createWindowContainer(qobject_cast(m_glMonitor)); m_videoWidget->setAcceptDrops(true); auto *leventEater = new QuickEventEater(this); m_videoWidget->installEventFilter(leventEater); connect(leventEater, &QuickEventEater::addEffect, this, &Monitor::slotAddEffect); m_qmlManager = new QmlManager(m_glMonitor); connect(m_qmlManager, &QmlManager::effectChanged, this, &Monitor::effectChanged); connect(m_qmlManager, &QmlManager::effectPointsChanged, this, &Monitor::effectPointsChanged); auto *monitorEventEater = new QuickMonitorEventEater(this); m_glWidget->installEventFilter(monitorEventEater); connect(monitorEventEater, &QuickMonitorEventEater::doKeyPressEvent, this, &Monitor::doKeyPressEvent); glayout->addWidget(m_videoWidget, 0, 0); m_verticalScroll = new QScrollBar(Qt::Vertical); glayout->addWidget(m_verticalScroll, 0, 1); m_verticalScroll->hide(); m_horizontalScroll = new QScrollBar(Qt::Horizontal); glayout->addWidget(m_horizontalScroll, 1, 0); m_horizontalScroll->hide(); connect(m_horizontalScroll, &QAbstractSlider::valueChanged, this, &Monitor::setOffsetX); connect(m_verticalScroll, &QAbstractSlider::valueChanged, this, &Monitor::setOffsetY); connect(m_glMonitor, &GLWidget::frameDisplayed, this, &Monitor::onFrameDisplayed); connect(m_glMonitor, &GLWidget::mouseSeek, this, &Monitor::slotMouseSeek); connect(m_glMonitor, &GLWidget::monitorPlay, this, &Monitor::slotPlay); connect(m_glMonitor, &GLWidget::startDrag, this, &Monitor::slotStartDrag); connect(m_glMonitor, &GLWidget::switchFullScreen, this, &Monitor::slotSwitchFullScreen); connect(m_glMonitor, &GLWidget::zoomChanged, this, &Monitor::setZoom); connect(m_glMonitor, SIGNAL(lockMonitor(bool)), this, SLOT(slotLockMonitor(bool)), Qt::DirectConnection); connect(m_glMonitor, &GLWidget::showContextMenu, this, &Monitor::slotShowMenu); connect(m_glMonitor, &GLWidget::gpuNotSupported, this, &Monitor::gpuError); m_glWidget->setMinimumSize(QSize(320, 180)); layout->addWidget(m_glWidget, 10); layout->addStretch(); // Tool bar buttons m_toolbar = new QToolBar(this); QWidget *sp1 = new QWidget(this); sp1->setSizePolicy(QSizePolicy::MinimumExpanding, QSizePolicy::Preferred); m_toolbar->addWidget(sp1); if (id == Kdenlive::ClipMonitor) { // Add options for recording m_recManager = new RecManager(this); connect(m_recManager, &RecManager::warningMessage, this, &Monitor::warningMessage); connect(m_recManager, &RecManager::addClipToProject, this, &Monitor::addClipToProject); m_toolbar->addAction(manager->getAction(QStringLiteral("insert_project_tree"))); m_toolbar->setToolTip(i18n("Insert Zone to Project Bin")); m_toolbar->addSeparator(); } if (id != Kdenlive::DvdMonitor) { QAction *markIn = new QAction(QIcon::fromTheme(QStringLiteral("zone-in")), i18n("Set Zone In"), this); QAction *markOut = new QAction(QIcon::fromTheme(QStringLiteral("zone-out")), i18n("Set Zone Out"), this); m_toolbar->addAction(markIn); m_toolbar->addAction(markOut); connect(markIn, &QAction::triggered, [&, manager]() { m_monitorManager->activateMonitor(m_id); manager->getAction(QStringLiteral("mark_in"))->trigger(); }); connect(markOut, &QAction::triggered, [&, manager]() { m_monitorManager->activateMonitor(m_id); manager->getAction(QStringLiteral("mark_out"))->trigger(); }); } m_toolbar->addAction(manager->getAction(QStringLiteral("monitor_seek_backward"))); auto *playButton = new QToolButton(m_toolbar); m_playMenu = new QMenu(i18n("Play..."), this); QAction *originalPlayAction = static_cast(manager->getAction(QStringLiteral("monitor_play"))); m_playAction = new KDualAction(i18n("Play"), i18n("Pause"), this); m_playAction->setInactiveIcon(QIcon::fromTheme(QStringLiteral("media-playback-start"))); m_playAction->setActiveIcon(QIcon::fromTheme(QStringLiteral("media-playback-pause"))); QString strippedTooltip = m_playAction->toolTip().remove(QRegExp(QStringLiteral("\\s\\(.*\\)"))); // append shortcut if it exists for action if (originalPlayAction->shortcut() == QKeySequence(0)) { m_playAction->setToolTip(strippedTooltip); } else { m_playAction->setToolTip(strippedTooltip + QStringLiteral(" (") + originalPlayAction->shortcut().toString() + QLatin1Char(')')); } m_playMenu->addAction(m_playAction); connect(m_playAction, &QAction::triggered, this, &Monitor::slotSwitchPlay); playButton->setMenu(m_playMenu); playButton->setPopupMode(QToolButton::MenuButtonPopup); m_toolbar->addWidget(playButton); m_toolbar->addAction(manager->getAction(QStringLiteral("monitor_seek_forward"))); playButton->setDefaultAction(m_playAction); m_configMenu = new QMenu(i18n("Misc..."), this); if (id != Kdenlive::DvdMonitor) { if (id == Kdenlive::ClipMonitor) { m_markerMenu = new QMenu(i18n("Go to marker..."), this); } else { m_markerMenu = new QMenu(i18n("Go to guide..."), this); } m_markerMenu->setEnabled(false); m_configMenu->addMenu(m_markerMenu); connect(m_markerMenu, &QMenu::triggered, this, &Monitor::slotGoToMarker); m_forceSize = new KSelectAction(QIcon::fromTheme(QStringLiteral("transform-scale")), i18n("Force Monitor Size"), this); QAction *fullAction = m_forceSize->addAction(QIcon(), i18n("Force 100%")); fullAction->setData(100); QAction *halfAction = m_forceSize->addAction(QIcon(), i18n("Force 50%")); halfAction->setData(50); QAction *freeAction = m_forceSize->addAction(QIcon(), i18n("Free Resize")); freeAction->setData(0); m_configMenu->addAction(m_forceSize); m_forceSize->setCurrentAction(freeAction); connect(m_forceSize, static_cast(&KSelectAction::triggered), this, &Monitor::slotForceSize); } // Create Volume slider popup m_audioSlider = new QSlider(Qt::Vertical); m_audioSlider->setRange(0, 100); m_audioSlider->setValue(KdenliveSettings::volume()); connect(m_audioSlider, &QSlider::valueChanged, this, &Monitor::slotSetVolume); auto *widgetslider = new QWidgetAction(this); widgetslider->setText(i18n("Audio volume")); widgetslider->setDefaultWidget(m_audioSlider); auto *menu = new QMenu(i18n("Volume"), this); menu->setIcon(QIcon::fromTheme(QStringLiteral("audio-volume-medium"))); menu->addAction(widgetslider); m_configMenu->addMenu(menu); /*QIcon icon; if (KdenliveSettings::volume() == 0) { icon = QIcon::fromTheme(QStringLiteral("audio-volume-muted")); } else { icon = QIcon::fromTheme(QStringLiteral("audio-volume-medium")); } m_audioButton->setIcon(icon);*/ setSizePolicy(QSizePolicy::Expanding, QSizePolicy::Expanding); setLayout(layout); setMinimumHeight(200); connect(this, &Monitor::scopesClear, m_glMonitor, &GLWidget::releaseAnalyse, Qt::DirectConnection); connect(m_glMonitor, &GLWidget::analyseFrame, this, &Monitor::frameUpdated); connect(m_glMonitor, &GLWidget::audioSamplesSignal, this, &Monitor::audioSamplesSignal); if (id != Kdenlive::ClipMonitor) { // TODO: reimplement // connect(render, &Render::durationChanged, this, &Monitor::durationChanged); connect(m_glMonitor->getControllerProxy(), &MonitorProxy::saveZone, this, &Monitor::updateTimelineClipZone); } else { connect(m_glMonitor->getControllerProxy(), &MonitorProxy::saveZone, this, &Monitor::updateClipZone); } connect(m_glMonitor->getControllerProxy(), &MonitorProxy::triggerAction, pCore.get(), &Core::triggerAction); connect(m_glMonitor->getControllerProxy(), &MonitorProxy::seekNextKeyframe, this, &Monitor::seekToNextKeyframe); connect(m_glMonitor->getControllerProxy(), &MonitorProxy::seekPreviousKeyframe, this, &Monitor::seekToPreviousKeyframe); connect(m_glMonitor->getControllerProxy(), &MonitorProxy::addRemoveKeyframe, this, &Monitor::addRemoveKeyframe); connect(m_glMonitor->getControllerProxy(), &MonitorProxy::seekToKeyframe, this, &Monitor::slotSeekToKeyFrame); m_sceneVisibilityAction = new QAction(QIcon::fromTheme(QStringLiteral("transform-crop")), i18n("Show/Hide edit mode"), this); m_sceneVisibilityAction->setCheckable(true); m_sceneVisibilityAction->setChecked(KdenliveSettings::showOnMonitorScene()); connect(m_sceneVisibilityAction, &QAction::triggered, this, &Monitor::slotEnableEffectScene); m_toolbar->addAction(m_sceneVisibilityAction); m_toolbar->addSeparator(); m_timePos = new TimecodeDisplay(m_monitorManager->timecode(), this); m_toolbar->addWidget(m_timePos); auto *configButton = new QToolButton(m_toolbar); configButton->setIcon(QIcon::fromTheme(QStringLiteral("kdenlive-menu"))); configButton->setToolTip(i18n("Options")); configButton->setMenu(m_configMenu); configButton->setPopupMode(QToolButton::InstantPopup); m_toolbar->addWidget(configButton); /*QWidget *spacer = new QWidget(this); spacer->setSizePolicy(QSizePolicy::MinimumExpanding, QSizePolicy::Preferred); m_toolbar->addWidget(spacer);*/ m_toolbar->addSeparator(); int tm = 0; int bm = 0; m_toolbar->getContentsMargins(nullptr, &tm, nullptr, &bm); m_audioMeterWidget = new MonitorAudioLevel(m_toolbar->height() - tm - bm, this); m_toolbar->addWidget(m_audioMeterWidget); if (!m_audioMeterWidget->isValid) { KdenliveSettings::setMonitoraudio(0x01); m_audioMeterWidget->setVisibility(false); } else { m_audioMeterWidget->setVisibility((KdenliveSettings::monitoraudio() & m_id) != 0); } connect(m_timePos, SIGNAL(timeCodeEditingFinished()), this, SLOT(slotSeek())); layout->addWidget(m_toolbar); if (m_recManager) { layout->addWidget(m_recManager->toolbar()); } // Load monitor overlay qml loadQmlScene(MonitorSceneDefault); // Info message widget m_infoMessage = new KMessageWidget(this); layout->addWidget(m_infoMessage); m_infoMessage->hide(); } Monitor::~Monitor() { delete m_splitEffect; delete m_audioMeterWidget; delete m_glMonitor; delete m_videoWidget; delete m_glWidget; delete m_timePos; } void Monitor::setOffsetX(int x) { m_glMonitor->setOffsetX(x, m_horizontalScroll->maximum()); } void Monitor::setOffsetY(int y) { m_glMonitor->setOffsetY(y, m_verticalScroll->maximum()); } void Monitor::slotGetCurrentImage(bool request) { m_glMonitor->sendFrameForAnalysis = request; m_monitorManager->activateMonitor(m_id); refreshMonitorIfActive(); if (request) { // Update analysis state QTimer::singleShot(500, m_monitorManager, &MonitorManager::checkScopes); } else { m_glMonitor->releaseAnalyse(); } } void Monitor::slotAddEffect(const QStringList &effect) { if (m_id == Kdenlive::ClipMonitor) { if (m_controller) { emit addMasterEffect(m_controller->AbstractProjectItem::clipId(), effect); } } else { emit addEffect(effect); } } void Monitor::refreshIcons() { QList allMenus = this->findChildren(); for (int i = 0; i < allMenus.count(); i++) { QAction *m = allMenus.at(i); QIcon ic = m->icon(); if (ic.isNull() || ic.name().isEmpty()) { continue; } QIcon newIcon = QIcon::fromTheme(ic.name()); m->setIcon(newIcon); } QList allButtons = this->findChildren(); for (int i = 0; i < allButtons.count(); i++) { KDualAction *m = allButtons.at(i); QIcon ic = m->activeIcon(); if (ic.isNull() || ic.name().isEmpty()) { continue; } QIcon newIcon = QIcon::fromTheme(ic.name()); m->setActiveIcon(newIcon); ic = m->inactiveIcon(); if (ic.isNull() || ic.name().isEmpty()) { continue; } newIcon = QIcon::fromTheme(ic.name()); m->setInactiveIcon(newIcon); } } QAction *Monitor::recAction() { if (m_recManager) { return m_recManager->recAction(); } return nullptr; } void Monitor::slotLockMonitor(bool lock) { m_monitorManager->lockMonitor(m_id, lock); } void Monitor::setupMenu(QMenu *goMenu, QMenu *overlayMenu, QAction *playZone, QAction *loopZone, QMenu *markerMenu, QAction *loopClip) { delete m_contextMenu; m_contextMenu = new QMenu(this); m_contextMenu->addMenu(m_playMenu); if (goMenu) { m_contextMenu->addMenu(goMenu); } if (markerMenu) { m_contextMenu->addMenu(markerMenu); QList list = markerMenu->actions(); for (int i = 0; i < list.count(); ++i) { if (list.at(i)->data().toString() == QLatin1String("edit_marker")) { m_editMarker = list.at(i); break; } } } m_playMenu->addAction(playZone); m_playMenu->addAction(loopZone); if (loopClip) { m_loopClipAction = loopClip; m_playMenu->addAction(loopClip); } // TODO: add save zone to timeline monitor when fixed m_contextMenu->addMenu(m_markerMenu); if (m_id == Kdenlive::ClipMonitor) { m_contextMenu->addAction(QIcon::fromTheme(QStringLiteral("document-save")), i18n("Save zone"), this, SLOT(slotSaveZone())); QAction *extractZone = m_configMenu->addAction(QIcon::fromTheme(QStringLiteral("document-new")), i18n("Extract Zone"), this, SLOT(slotExtractCurrentZone())); m_contextMenu->addAction(extractZone); } m_contextMenu->addAction(m_monitorManager->getAction(QStringLiteral("extract_frame"))); m_contextMenu->addAction(m_monitorManager->getAction(QStringLiteral("extract_frame_to_project"))); if (m_id == Kdenlive::ProjectMonitor) { m_multitrackView = m_contextMenu->addAction(QIcon::fromTheme(QStringLiteral("view-split-left-right")), i18n("Multitrack view"), this, SIGNAL(multitrackView(bool))); m_multitrackView->setCheckable(true); m_configMenu->addAction(m_multitrackView); } else if (m_id == Kdenlive::ClipMonitor) { QAction *setThumbFrame = m_contextMenu->addAction(QIcon::fromTheme(QStringLiteral("document-new")), i18n("Set current image as thumbnail"), this, SLOT(slotSetThumbFrame())); m_configMenu->addAction(setThumbFrame); } if (overlayMenu) { m_contextMenu->addMenu(overlayMenu); } QAction *overlayAudio = m_contextMenu->addAction(QIcon(), i18n("Overlay audio waveform")); overlayAudio->setCheckable(true); connect(overlayAudio, &QAction::toggled, m_glMonitor, &GLWidget::slotSwitchAudioOverlay); overlayAudio->setChecked(KdenliveSettings::displayAudioOverlay()); m_configMenu->addAction(overlayAudio); QAction *switchAudioMonitor = m_configMenu->addAction(i18n("Show Audio Levels"), this, SLOT(slotSwitchAudioMonitor())); switchAudioMonitor->setCheckable(true); switchAudioMonitor->setChecked((KdenliveSettings::monitoraudio() & m_id) != 0); // For some reason, the frame in QAbstracSpinBox (base class of TimeCodeDisplay) needs to be displayed once, then hidden // or it will never appear (supposed to appear on hover). m_timePos->setFrame(false); } void Monitor::slotGoToMarker(QAction *action) { int pos = action->data().toInt(); slotSeek(pos); } void Monitor::slotForceSize(QAction *a) { int resizeType = a->data().toInt(); int profileWidth = 320; int profileHeight = 200; if (resizeType > 0) { // calculate size QRect r = QApplication::desktop()->screenGeometry(); profileHeight = m_glMonitor->profileSize().height() * resizeType / 100; profileWidth = pCore->getCurrentProfile()->dar() * profileHeight; if (profileWidth > r.width() * 0.8 || profileHeight > r.height() * 0.7) { // reset action to free resize const QList list = m_forceSize->actions(); for (QAction *ac : list) { if (ac->data().toInt() == m_forceSizeFactor) { m_forceSize->setCurrentAction(ac); break; } } warningMessage(i18n("Your screen resolution is not sufficient for this action")); return; } } switch (resizeType) { case 100: case 50: // resize full size setSizePolicy(QSizePolicy::MinimumExpanding, QSizePolicy::MinimumExpanding); m_videoWidget->setMinimumSize(profileWidth, profileHeight); m_videoWidget->setMaximumSize(profileWidth, profileHeight); setMinimumSize(QSize(profileWidth, profileHeight + m_toolbar->height() + m_glMonitor->getControllerProxy()->rulerHeight())); break; default: // Free resize m_videoWidget->setMinimumSize(profileWidth, profileHeight); m_videoWidget->setMaximumSize(QWIDGETSIZE_MAX, QWIDGETSIZE_MAX); setMinimumSize(QSize(profileWidth, profileHeight + m_toolbar->height() + m_glMonitor->getControllerProxy()->rulerHeight())); setSizePolicy(QSizePolicy::Expanding, QSizePolicy::Expanding); break; } m_forceSizeFactor = resizeType; updateGeometry(); } QString Monitor::getTimecodeFromFrames(int pos) { return m_monitorManager->timecode().getTimecodeFromFrames(pos); } double Monitor::fps() const { return m_monitorManager->timecode().fps(); } Timecode Monitor::timecode() const { return m_monitorManager->timecode(); } void Monitor::updateMarkers() { if (m_controller) { m_markerMenu->clear(); QList markers = m_controller->getMarkerModel()->getAllMarkers(); if (!markers.isEmpty()) { for (int i = 0; i < markers.count(); ++i) { int pos = (int)markers.at(i).time().frames(m_monitorManager->timecode().fps()); QString position = m_monitorManager->timecode().getTimecode(markers.at(i).time()) + QLatin1Char(' ') + markers.at(i).comment(); QAction *go = m_markerMenu->addAction(position); go->setData(pos); } } m_markerMenu->setEnabled(!m_markerMenu->isEmpty()); } } void Monitor::setGuides(const QMap &guides) { // TODO: load guides model m_markerMenu->clear(); QMapIterator i(guides); QList guidesList; while (i.hasNext()) { i.next(); CommentedTime timeGuide(GenTime(i.key()), i.value()); guidesList << timeGuide; int pos = (int)timeGuide.time().frames(m_monitorManager->timecode().fps()); QString position = m_monitorManager->timecode().getTimecode(timeGuide.time()) + QLatin1Char(' ') + timeGuide.comment(); QAction *go = m_markerMenu->addAction(position); go->setData(pos); } // m_ruler->setMarkers(guidesList); m_markerMenu->setEnabled(!m_markerMenu->isEmpty()); checkOverlay(); } void Monitor::slotSeekToPreviousSnap() { if (m_controller) { m_glMonitor->seek(getSnapForPos(true).frames(m_monitorManager->timecode().fps())); } } void Monitor::slotSeekToNextSnap() { if (m_controller) { m_glMonitor->seek(getSnapForPos(false).frames(m_monitorManager->timecode().fps())); } } int Monitor::position() { return m_glMonitor->getCurrentPos(); } GenTime Monitor::getSnapForPos(bool previous) { int frame = previous ? m_snaps->getPreviousPoint(m_glMonitor->getCurrentPos()) : m_snaps->getNextPoint(m_glMonitor->getCurrentPos()); return {frame, pCore->getCurrentFps()}; } void Monitor::slotLoadClipZone(const QPoint &zone) { m_glMonitor->getControllerProxy()->setZone(zone.x(), zone.y()); checkOverlay(); } void Monitor::slotSetZoneStart() { m_glMonitor->getControllerProxy()->setZoneIn(m_glMonitor->getCurrentPos()); if (m_controller) { m_controller->setZone(m_glMonitor->getControllerProxy()->zone()); } else { // timeline emit timelineZoneChanged(); } checkOverlay(); } void Monitor::slotSetZoneEnd(bool discardLastFrame) { Q_UNUSED(discardLastFrame); int pos = m_glMonitor->getCurrentPos(); if (m_controller) { if (pos < (int)m_controller->frameDuration() - 1) { pos++; } } else pos++; m_glMonitor->getControllerProxy()->setZoneOut(pos); if (m_controller) { m_controller->setZone(m_glMonitor->getControllerProxy()->zone()); } checkOverlay(); } // virtual void Monitor::mousePressEvent(QMouseEvent *event) { m_monitorManager->activateMonitor(m_id); if ((event->button() & Qt::RightButton) == 0u) { if (m_glWidget->geometry().contains(event->pos())) { m_DragStartPosition = event->pos(); event->accept(); } } else if (m_contextMenu) { slotActivateMonitor(); m_contextMenu->popup(event->globalPos()); event->accept(); } QWidget::mousePressEvent(event); } void Monitor::slotShowMenu(const QPoint pos) { slotActivateMonitor(); if (m_contextMenu) { m_contextMenu->popup(pos); } } void Monitor::resizeEvent(QResizeEvent *event) { Q_UNUSED(event) if (m_glMonitor->zoom() > 0.0f) { float horizontal = float(m_horizontalScroll->value()) / float(m_horizontalScroll->maximum()); float vertical = float(m_verticalScroll->value()) / float(m_verticalScroll->maximum()); adjustScrollBars(horizontal, vertical); } else { m_horizontalScroll->hide(); m_verticalScroll->hide(); } } void Monitor::adjustScrollBars(float horizontal, float vertical) { if (m_glMonitor->zoom() > 1.0f) { m_horizontalScroll->setPageStep(m_glWidget->width()); m_horizontalScroll->setMaximum((int)((float)m_glMonitor->profileSize().width() * m_glMonitor->zoom()) - m_horizontalScroll->pageStep()); m_horizontalScroll->setValue(qRound(horizontal * float(m_horizontalScroll->maximum()))); emit m_horizontalScroll->valueChanged(m_horizontalScroll->value()); m_horizontalScroll->show(); } else { int max = (int)((float)m_glMonitor->profileSize().width() * m_glMonitor->zoom()) - m_glWidget->width(); emit m_horizontalScroll->valueChanged(qRound(0.5 * max)); m_horizontalScroll->hide(); } if (m_glMonitor->zoom() > 1.0f) { m_verticalScroll->setPageStep(m_glWidget->height()); m_verticalScroll->setMaximum((int)((float)m_glMonitor->profileSize().height() * m_glMonitor->zoom()) - m_verticalScroll->pageStep()); m_verticalScroll->setValue((int)((float)m_verticalScroll->maximum() * vertical)); emit m_verticalScroll->valueChanged(m_verticalScroll->value()); m_verticalScroll->show(); } else { int max = (int)((float)m_glMonitor->profileSize().height() * m_glMonitor->zoom()) - m_glWidget->height(); emit m_verticalScroll->valueChanged(qRound(0.5 * max)); m_verticalScroll->hide(); } } void Monitor::setZoom() { if (qFuzzyCompare(m_glMonitor->zoom(), 1.0f)) { m_horizontalScroll->hide(); m_verticalScroll->hide(); m_glMonitor->setOffsetX(m_horizontalScroll->value(), m_horizontalScroll->maximum()); m_glMonitor->setOffsetY(m_verticalScroll->value(), m_verticalScroll->maximum()); } else { adjustScrollBars(0.5f, 0.5f); } } void Monitor::slotSwitchFullScreen(bool minimizeOnly) { // TODO: disable screensaver? if (!m_glWidget->isFullScreen() && !minimizeOnly) { // Check if we have a multiple monitor setup int monitors = QApplication::desktop()->screenCount(); int screen = -1; if (monitors > 1) { QRect screenres; // Move monitor widget to the second screen (one screen for Kdenlive, the other one for the Monitor widget // int currentScreen = QApplication::desktop()->screenNumber(this); for (int i = 0; screen == -1 && i < QApplication::desktop()->screenCount(); i++) { if (i != QApplication::desktop()->screenNumber(this->parentWidget()->parentWidget())) { screen = i; } } } m_qmlManager->enableAudioThumbs(false); m_glWidget->setParent(QApplication::desktop()->screen(screen)); m_glWidget->move(QApplication::desktop()->screenGeometry(screen).bottomLeft()); m_glWidget->showFullScreen(); } else { m_glWidget->showNormal(); m_qmlManager->enableAudioThumbs(true); auto *lay = (QVBoxLayout *)layout(); lay->insertWidget(0, m_glWidget, 10); } } void Monitor::reparent() { m_glWidget->setParent(nullptr); m_glWidget->showMinimized(); m_glWidget->showNormal(); auto *lay = (QVBoxLayout *)layout(); lay->insertWidget(0, m_glWidget, 10); } // virtual void Monitor::mouseReleaseEvent(QMouseEvent *event) { if (m_dragStarted) { event->ignore(); return; } if (event->button() != Qt::RightButton) { if (m_glMonitor->geometry().contains(event->pos())) { if (isActive()) { slotPlay(); } else { slotActivateMonitor(); } } // else event->ignore(); //QWidget::mouseReleaseEvent(event); } m_dragStarted = false; event->accept(); QWidget::mouseReleaseEvent(event); } void Monitor::slotStartDrag() { if (m_id == Kdenlive::ProjectMonitor || m_controller == nullptr) { // dragging is only allowed for clip monitor return; } auto *drag = new QDrag(this); auto *mimeData = new QMimeData; // Get drag state QQuickItem *root = m_glMonitor->rootObject(); int dragType = 0; if (root) { dragType = root->property("dragType").toInt(); root->setProperty("dragType", 0); } QByteArray prodData; QPoint p = m_glMonitor->getControllerProxy()->zone(); if (p.x() == -1 || p.y() == -1) { prodData = m_controller->AbstractProjectItem::clipId().toUtf8(); } else { QStringList list; list.append(m_controller->AbstractProjectItem::clipId()); list.append(QString::number(p.x())); list.append(QString::number(p.y() - 1)); prodData.append(list.join(QLatin1Char('/')).toUtf8()); } switch (dragType) { case 1: // Audio only drag prodData.prepend('A'); break; case 2: // Audio only drag prodData.prepend('V'); break; default: break; } mimeData->setData(QStringLiteral("kdenlive/producerslist"), prodData); drag->setMimeData(mimeData); /*QPixmap pix = m_currentClip->thumbnail(); drag->setPixmap(pix); drag->setHotSpot(QPoint(0, 50));*/ drag->start(Qt::MoveAction); } void Monitor::enterEvent(QEvent *event) { m_qmlManager->enableAudioThumbs(true); QWidget::enterEvent(event); } void Monitor::leaveEvent(QEvent *event) { m_qmlManager->enableAudioThumbs(false); QWidget::leaveEvent(event); } // virtual void Monitor::mouseMoveEvent(QMouseEvent *event) { if (m_dragStarted || m_controller == nullptr) { return; } if ((event->pos() - m_DragStartPosition).manhattanLength() < QApplication::startDragDistance()) { return; } { auto *drag = new QDrag(this); auto *mimeData = new QMimeData; m_dragStarted = true; QStringList list; list.append(m_controller->AbstractProjectItem::clipId()); QPoint p = m_glMonitor->getControllerProxy()->zone(); list.append(QString::number(p.x())); list.append(QString::number(p.y())); QByteArray clipData; clipData.append(list.join(QLatin1Char(';')).toUtf8()); mimeData->setData(QStringLiteral("kdenlive/clip"), clipData); drag->setMimeData(mimeData); drag->start(Qt::MoveAction); } event->accept(); } /*void Monitor::dragMoveEvent(QDragMoveEvent * event) { event->setDropAction(Qt::IgnoreAction); event->setDropAction(Qt::MoveAction); if (event->mimeData()->hasText()) { event->acceptProposedAction(); } } Qt::DropActions Monitor::supportedDropActions() const { // returns what actions are supported when dropping return Qt::MoveAction; }*/ QStringList Monitor::mimeTypes() const { QStringList qstrList; // list of accepted MIME types for drop qstrList.append(QStringLiteral("kdenlive/clip")); return qstrList; } // virtual void Monitor::wheelEvent(QWheelEvent *event) { slotMouseSeek(event->delta(), event->modifiers()); event->accept(); } void Monitor::mouseDoubleClickEvent(QMouseEvent *event) { slotSwitchFullScreen(); event->accept(); } void Monitor::keyPressEvent(QKeyEvent *event) { if (event->key() == Qt::Key_Escape) { slotSwitchFullScreen(); event->accept(); return; } if (m_glWidget->isFullScreen()) { event->ignore(); emit passKeyPress(event); return; } QWidget::keyPressEvent(event); } void Monitor::slotMouseSeek(int eventDelta, uint modifiers) { if ((modifiers & Qt::ControlModifier) != 0u) { int delta = m_monitorManager->timecode().fps(); if (eventDelta > 0) { delta = 0 - delta; } m_glMonitor->seek(m_glMonitor->getCurrentPos() - delta); } else if ((modifiers & Qt::AltModifier) != 0u) { if (eventDelta >= 0) { emit seekToPreviousSnap(); } else { emit seekToNextSnap(); } } else { if (eventDelta >= 0) { slotRewindOneFrame(); } else { slotForwardOneFrame(); } } } void Monitor::slotSetThumbFrame() { if (m_controller == nullptr) { return; } m_controller->setProducerProperty(QStringLiteral("kdenlive:thumbnailFrame"), m_glMonitor->getCurrentPos()); emit refreshClipThumbnail(m_controller->AbstractProjectItem::clipId()); } void Monitor::slotExtractCurrentZone() { if (m_controller == nullptr) { return; } emit extractZone(m_controller->AbstractProjectItem::clipId()); } std::shared_ptr Monitor::currentController() const { return m_controller; } void Monitor::slotExtractCurrentFrame(QString frameName, bool addToProject) { if (QFileInfo(frameName).fileName().isEmpty()) { // convenience: when extracting an image to be added to the project, // suggest a suitable image file name. In the project monitor, this // suggestion bases on the project file name; in the clip monitor, // the suggestion bases on the clip file name currently shown. // Finally, the frame number is added to this suggestion, prefixed // with "-f", so we get something like clip-f#.png. QString suggestedImageName = QFileInfo(currentController() ? currentController()->clipName() : pCore->currentDoc()->url().isValid() ? pCore->currentDoc()->url().fileName() : i18n("untitled")) .completeBaseName() + QStringLiteral("-f") + QString::number(m_glMonitor->getCurrentPos()).rightJustified(6, QLatin1Char('0')) + QStringLiteral(".png"); frameName = QFileInfo(frameName, suggestedImageName).fileName(); } QString framesFolder = KRecentDirs::dir(QStringLiteral(":KdenliveFramesFolder")); if (framesFolder.isEmpty()) { framesFolder = QDir::homePath(); } QScopedPointer dlg(new QDialog(this)); QScopedPointer fileWidget(new KFileWidget(QUrl::fromLocalFile(framesFolder), dlg.data())); dlg->setWindowTitle(addToProject ? i18n("Save Image") : i18n("Save Image to Project")); auto *layout = new QVBoxLayout; layout->addWidget(fileWidget.data()); QCheckBox *b = nullptr; if (m_id == Kdenlive::ClipMonitor) { b = new QCheckBox(i18n("Export image using source resolution"), dlg.data()); b->setChecked(KdenliveSettings::exportframe_usingsourceres()); fileWidget->setCustomWidget(b); } fileWidget->setConfirmOverwrite(true); fileWidget->okButton()->show(); fileWidget->cancelButton()->show(); QObject::connect(fileWidget->okButton(), &QPushButton::clicked, fileWidget.data(), &KFileWidget::slotOk); QObject::connect(fileWidget.data(), &KFileWidget::accepted, fileWidget.data(), &KFileWidget::accept); QObject::connect(fileWidget.data(), &KFileWidget::accepted, dlg.data(), &QDialog::accept); QObject::connect(fileWidget->cancelButton(), &QPushButton::clicked, dlg.data(), &QDialog::reject); dlg->setLayout(layout); fileWidget->setMimeFilter(QStringList() << QStringLiteral("image/png")); fileWidget->setMode(KFile::File | KFile::LocalOnly); fileWidget->setOperationMode(KFileWidget::Saving); QUrl relativeUrl; relativeUrl.setPath(frameName); #if KIO_VERSION >= QT_VERSION_CHECK(5, 33, 0) fileWidget->setSelectedUrl(relativeUrl); #else fileWidget->setSelection(relativeUrl.toString()); #endif KSharedConfig::Ptr conf = KSharedConfig::openConfig(); QWindow *handle = dlg->windowHandle(); if ((handle != nullptr) && conf->hasGroup("FileDialogSize")) { KWindowConfig::restoreWindowSize(handle, conf->group("FileDialogSize")); dlg->resize(handle->size()); } if (dlg->exec() == QDialog::Accepted) { QString selectedFile = fileWidget->selectedFile(); if (!selectedFile.isEmpty()) { // Create Qimage with frame QImage frame; // check if we are using a proxy if ((m_controller != nullptr) && !m_controller->getProducerProperty(QStringLiteral("kdenlive:proxy")).isEmpty() && m_controller->getProducerProperty(QStringLiteral("kdenlive:proxy")) != QLatin1String("-")) { // using proxy, use original clip url to get frame frame = m_glMonitor->getControllerProxy()->extractFrame(m_glMonitor->getCurrentPos(), m_controller->getProducerProperty(QStringLiteral("kdenlive:originalurl")), -1, -1, b != nullptr ? b->isChecked() : false); } else { frame = m_glMonitor->getControllerProxy()->extractFrame(m_glMonitor->getCurrentPos(), QString(), -1, -1, b != nullptr ? b->isChecked() : false); } frame.save(selectedFile); if (b != nullptr) { KdenliveSettings::setExportframe_usingsourceres(b->isChecked()); } KRecentDirs::add(QStringLiteral(":KdenliveFramesFolder"), QUrl::fromLocalFile(selectedFile).adjusted(QUrl::RemoveFilename).toLocalFile()); if (addToProject) { QStringList folderInfo = pCore->bin()->getFolderInfo(); pCore->bin()->droppedUrls(QList() << QUrl::fromLocalFile(selectedFile), folderInfo); } } } } void Monitor::setTimePos(const QString &pos) { m_timePos->setValue(pos); slotSeek(); } void Monitor::slotSeek() { slotSeek(m_timePos->getValue()); } void Monitor::slotSeek(int pos) { slotActivateMonitor(); m_glMonitor->seek(pos); } void Monitor::checkOverlay(int pos) { if (m_qmlManager->sceneType() != MonitorSceneDefault) { // we are not in main view, ignore return; } QString overlayText; if (pos == -1) { pos = m_timePos->getValue(); } QPoint zone = m_glMonitor->getControllerProxy()->zone(); std::shared_ptr model; if (m_id == Kdenlive::ClipMonitor && m_controller) { model = m_controller->getMarkerModel(); } else if (m_id == Kdenlive::ProjectMonitor && pCore->currentDoc()) { model = pCore->currentDoc()->getGuideModel(); } if (model) { bool found = false; CommentedTime marker = model->getMarker(GenTime(pos, m_monitorManager->timecode().fps()), &found); if (!found) { if (pos == zone.x()) { overlayText = i18n("In Point"); } else if (pos == zone.y() - 1) { overlayText = i18n("Out Point"); } } else { overlayText = marker.comment(); } } m_glMonitor->getControllerProxy()->setMarkerComment(overlayText); } int Monitor::getZoneStart() { return m_glMonitor->getControllerProxy()->zoneIn(); } int Monitor::getZoneEnd() { return m_glMonitor->getControllerProxy()->zoneOut(); } void Monitor::slotZoneStart() { slotActivateMonitor(); m_glMonitor->getControllerProxy()->pauseAndSeek(m_glMonitor->getControllerProxy()->zoneIn()); } void Monitor::slotZoneEnd() { slotActivateMonitor(); m_glMonitor->getControllerProxy()->pauseAndSeek(m_glMonitor->getControllerProxy()->zoneOut() - 1); } void Monitor::slotRewind(double speed) { slotActivateMonitor(); if (qFuzzyIsNull(speed)) { double currentspeed = m_glMonitor->playSpeed(); if (currentspeed > -1) { speed = -1; } else { speed = currentspeed * 1.5; } } m_glMonitor->switchPlay(true, speed); m_playAction->setActive(true); } void Monitor::slotForward(double speed) { slotActivateMonitor(); if (qFuzzyIsNull(speed)) { double currentspeed = m_glMonitor->playSpeed(); if (currentspeed < 1) { speed = 1; } else { speed = currentspeed * 1.2; } } m_glMonitor->switchPlay(true, speed); m_playAction->setActive(true); } void Monitor::slotRewindOneFrame(int diff) { slotActivateMonitor(); m_glMonitor->seek(m_glMonitor->getCurrentPos() - diff); } void Monitor::slotForwardOneFrame(int diff) { slotActivateMonitor(); m_glMonitor->seek(m_glMonitor->getCurrentPos() + diff); } void Monitor::seekCursor(int pos) { Q_UNUSED(pos) // Deprecated should not be used, instead requestSeek /*if (m_ruler->slotNewValue(pos)) { m_timePos->setValue(pos); checkOverlay(pos); if (m_id != Kdenlive::ClipMonitor) { emit renderPosition(pos); } }*/ } void Monitor::adjustRulerSize(int length, const std::shared_ptr &markerModel) { if (m_controller != nullptr) { m_glMonitor->setRulerInfo(length); } else { m_glMonitor->setRulerInfo(length, markerModel); } m_timePos->setRange(0, length); if (markerModel) { connect(markerModel.get(), SIGNAL(dataChanged(const QModelIndex &, const QModelIndex &, const QVector &)), this, SLOT(checkOverlay())); connect(markerModel.get(), SIGNAL(rowsInserted(const QModelIndex &, int, int)), this, SLOT(checkOverlay())); connect(markerModel.get(), SIGNAL(rowsRemoved(const QModelIndex &, int, int)), this, SLOT(checkOverlay())); } } void Monitor::stop() { m_playAction->setActive(false); m_glMonitor->stop(); } void Monitor::mute(bool mute, bool updateIconOnly) { // TODO: we should set the "audio_off" property to 1 to mute the consumer instead of changing volume QIcon icon; if (mute || KdenliveSettings::volume() == 0) { icon = QIcon::fromTheme(QStringLiteral("audio-volume-muted")); } else { icon = QIcon::fromTheme(QStringLiteral("audio-volume-medium")); } if (!updateIconOnly) { m_glMonitor->setVolume(mute ? 0 : (double)KdenliveSettings::volume() / 100.0); } } void Monitor::start() { if (!isVisible() || !isActive()) { return; } m_glMonitor->startConsumer(); } void Monitor::slotRefreshMonitor(bool visible) { if (visible) { if (slotActivateMonitor()) { start(); } } } void Monitor::refreshMonitorIfActive(bool directUpdate) { if (isActive()) { if (directUpdate) { m_glMonitor->refresh(); } else { m_glMonitor->requestRefresh(); } } } void Monitor::pause() { if (!m_playAction->isActive()) { return; } slotActivateMonitor(); m_glMonitor->switchPlay(false); m_playAction->setActive(false); } void Monitor::switchPlay(bool play) { m_playAction->setActive(play); m_glMonitor->switchPlay(play); } void Monitor::slotSwitchPlay() { slotActivateMonitor(); m_glMonitor->switchPlay(m_playAction->isActive()); } void Monitor::slotPlay() { m_playAction->trigger(); } void Monitor::slotPlayZone() { slotActivateMonitor(); bool ok = m_glMonitor->playZone(); if (ok) { m_playAction->setActive(true); } } void Monitor::slotLoopZone() { slotActivateMonitor(); bool ok = m_glMonitor->playZone(true); if (ok) { m_playAction->setActive(true); } } void Monitor::slotLoopClip() { slotActivateMonitor(); bool ok = m_glMonitor->loopClip(); if (ok) { m_playAction->setActive(true); } } void Monitor::updateClipProducer(const std::shared_ptr &prod) { if (m_glMonitor->setProducer(prod, isActive(), -1)) { prod->set_speed(1.0); } } void Monitor::updateClipProducer(const QString &playlist) { Q_UNUSED(playlist) // TODO // Mlt::Producer *prod = new Mlt::Producer(*m_glMonitor->profile(), playlist.toUtf8().constData()); // m_glMonitor->setProducer(prod, isActive(), render->seekFramePosition()); m_glMonitor->switchPlay(true); } void Monitor::slotOpenClip(const std::shared_ptr &controller, int in, int out) { if (m_controller) { disconnect(m_controller->getMarkerModel().get(), SIGNAL(dataChanged(const QModelIndex &, const QModelIndex &, const QVector &)), this, SLOT(checkOverlay())); disconnect(m_controller->getMarkerModel().get(), SIGNAL(rowsInserted(const QModelIndex &, int, int)), this, SLOT(checkOverlay())); disconnect(m_controller->getMarkerModel().get(), SIGNAL(rowsRemoved(const QModelIndex &, int, int)), this, SLOT(checkOverlay())); } m_controller = controller; - loadQmlScene(MonitorSceneDefault); m_snaps.reset(new SnapModel()); m_glMonitor->getControllerProxy()->resetZone(); if (controller) { connect(m_controller->getMarkerModel().get(), SIGNAL(dataChanged(const QModelIndex &, const QModelIndex &, const QVector &)), this, SLOT(checkOverlay())); connect(m_controller->getMarkerModel().get(), SIGNAL(rowsInserted(const QModelIndex &, int, int)), this, SLOT(checkOverlay())); connect(m_controller->getMarkerModel().get(), SIGNAL(rowsRemoved(const QModelIndex &, int, int)), this, SLOT(checkOverlay())); if (m_recManager->toolbar()->isVisible()) { // we are in record mode, don't display clip return; } m_glMonitor->setRulerInfo((int)m_controller->frameDuration(), controller->getMarkerModel()); + loadQmlScene(MonitorSceneDefault); m_timePos->setRange(0, (int)m_controller->frameDuration()); updateMarkers(); connect(m_glMonitor->getControllerProxy(), &MonitorProxy::addSnap, this, &Monitor::addSnapPoint, Qt::DirectConnection); connect(m_glMonitor->getControllerProxy(), &MonitorProxy::removeSnap, this, &Monitor::removeSnapPoint, Qt::DirectConnection); if (out == -1) { m_glMonitor->getControllerProxy()->setZone(m_controller->zone(), false); qDebug() << m_controller->zone(); } else { m_glMonitor->getControllerProxy()->setZone(in, out, false); } m_snaps->addPoint((int)m_controller->frameDuration()); // Loading new clip / zone, stop if playing if (m_playAction->isActive()) { m_playAction->setActive(false); } m_glMonitor->setProducer(m_controller->originalProducer(), isActive(), in); m_audioMeterWidget->audioChannels = controller->audioInfo() ? controller->audioInfo()->channels() : 0; m_glMonitor->setAudioThumb(controller->audioChannels(), controller->audioFrameCache); m_controller->getMarkerModel()->registerSnapModel(m_snaps); m_glMonitor->getControllerProxy()->setClipHasAV(controller->hasAudioAndVideo()); // hasEffects = controller->hasEffects(); } else { + loadQmlScene(MonitorSceneDefault); m_glMonitor->setProducer(nullptr, isActive()); m_glMonitor->setAudioThumb(); m_audioMeterWidget->audioChannels = 0; m_glMonitor->getControllerProxy()->setClipHasAV(false); } if (slotActivateMonitor()) { start(); } checkOverlay(); } const QString Monitor::activeClipId() { if (m_controller) { return m_controller->AbstractProjectItem::clipId(); } return QString(); } void Monitor::slotOpenDvdFile(const QString &file) { // TODO Q_UNUSED(file) m_glMonitor->initializeGL(); // render->loadUrl(file); } void Monitor::slotSaveZone() { // TODO? or deprecate // render->saveZone(pCore->currentDoc()->projectDataFolder(), m_ruler->zone()); } void Monitor::setCustomProfile(const QString &profile, const Timecode &tc) { // TODO or deprecate Q_UNUSED(profile) m_timePos->updateTimeCode(tc); if (/* DISABLES CODE */ (true)) { return; } slotActivateMonitor(); // render->prepareProfileReset(tc.fps()); if (m_multitrackView) { m_multitrackView->setChecked(false); } // TODO: this is a temporary profile for DVD preview, it should not alter project profile // pCore->setCurrentProfile(profile); m_glMonitor->reloadProfile(); } void Monitor::resetProfile() { m_timePos->updateTimeCode(m_monitorManager->timecode()); m_glMonitor->reloadProfile(); m_glMonitor->rootObject()->setProperty("framesize", QRect(0, 0, m_glMonitor->profileSize().width(), m_glMonitor->profileSize().height())); double fps = m_monitorManager->timecode().fps(); // Update drop frame info m_qmlManager->setProperty(QStringLiteral("dropped"), false); m_qmlManager->setProperty(QStringLiteral("fps"), QString::number(fps, 'g', 2)); } void Monitor::resetConsumer(bool fullReset) { m_glMonitor->resetConsumer(fullReset); } const QString Monitor::sceneList(const QString &root, const QString &fullPath) { return m_glMonitor->sceneList(root, fullPath); } void Monitor::updateClipZone() { if (m_controller == nullptr) { return; } m_controller->setZone(m_glMonitor->getControllerProxy()->zone()); } void Monitor::updateTimelineClipZone() { emit zoneUpdated(m_glMonitor->getControllerProxy()->zone()); } void Monitor::switchDropFrames(bool drop) { m_glMonitor->setDropFrames(drop); } void Monitor::switchMonitorInfo(int code) { int currentOverlay; if (m_id == Kdenlive::ClipMonitor) { currentOverlay = KdenliveSettings::displayClipMonitorInfo(); currentOverlay ^= code; KdenliveSettings::setDisplayClipMonitorInfo(currentOverlay); } else { currentOverlay = KdenliveSettings::displayProjectMonitorInfo(); currentOverlay ^= code; KdenliveSettings::setDisplayProjectMonitorInfo(currentOverlay); } updateQmlDisplay(currentOverlay); } void Monitor::updateMonitorGamma() { if (isActive()) { stop(); m_glMonitor->updateGamma(); start(); } else { m_glMonitor->updateGamma(); } } void Monitor::slotEditMarker() { if (m_editMarker) { m_editMarker->trigger(); } } void Monitor::updateTimecodeFormat() { m_timePos->slotUpdateTimeCodeFormat(); m_glMonitor->rootObject()->setProperty("timecode", m_timePos->displayText()); } QPoint Monitor::getZoneInfo() const { if (m_controller == nullptr) { return {}; } return m_controller->zone(); } void Monitor::slotEnableEffectScene(bool enable) { KdenliveSettings::setShowOnMonitorScene(enable); MonitorSceneType sceneType = enable ? m_lastMonitorSceneType : MonitorSceneDefault; slotShowEffectScene(sceneType, true); if (enable) { emit updateScene(); } } void Monitor::slotShowEffectScene(MonitorSceneType sceneType, bool temporary) { if (sceneType == MonitorSceneNone) { // We just want to revert to normal scene if (m_qmlManager->sceneType() == MonitorSceneSplit || m_qmlManager->sceneType() == MonitorSceneDefault) { // Ok, nothing to do return; } sceneType = MonitorSceneDefault; } if (!temporary) { m_lastMonitorSceneType = sceneType; } loadQmlScene(sceneType); } void Monitor::slotSeekToKeyFrame() { if (m_qmlManager->sceneType() == MonitorSceneGeometry) { // Adjust splitter pos int kfr = m_glMonitor->rootObject()->property("requestedKeyFrame").toInt(); emit seekToKeyframe(kfr); } } void Monitor::setUpEffectGeometry(const QRect &r, const QVariantList &list, const QVariantList &types) { QQuickItem *root = m_glMonitor->rootObject(); if (!root) { return; } if (!list.isEmpty()) { root->setProperty("centerPointsTypes", types); root->setProperty("centerPoints", list); } if (!r.isEmpty()) { root->setProperty("framesize", r); } } void Monitor::setEffectSceneProperty(const QString &name, const QVariant &value) { QQuickItem *root = m_glMonitor->rootObject(); if (!root) { return; } root->setProperty(name.toUtf8().constData(), value); } QRect Monitor::effectRect() const { QQuickItem *root = m_glMonitor->rootObject(); if (!root) { return {}; } return root->property("framesize").toRect(); } QVariantList Monitor::effectPolygon() const { QQuickItem *root = m_glMonitor->rootObject(); if (!root) { return QVariantList(); } return root->property("centerPoints").toList(); } QVariantList Monitor::effectRoto() const { QQuickItem *root = m_glMonitor->rootObject(); if (!root) { return QVariantList(); } QVariantList points = root->property("centerPoints").toList(); QVariantList controlPoints = root->property("centerPointsTypes").toList(); // rotoscoping effect needs a list of QVariantList mix; mix.reserve(points.count() * 3); for (int i = 0; i < points.count(); i++) { mix << controlPoints.at(2 * i); mix << points.at(i); mix << controlPoints.at(2 * i + 1); } return mix; } void Monitor::setEffectKeyframe(bool enable) { QQuickItem *root = m_glMonitor->rootObject(); if (root) { root->setProperty("iskeyframe", enable); } } bool Monitor::effectSceneDisplayed(MonitorSceneType effectType) { return m_qmlManager->sceneType() == effectType; } void Monitor::slotSetVolume(int volume) { KdenliveSettings::setVolume(volume); QIcon icon; double renderVolume = m_glMonitor->volume(); m_glMonitor->setVolume((double)volume / 100.0); if (renderVolume > 0 && volume > 0) { return; } /*if (volume == 0) { icon = QIcon::fromTheme(QStringLiteral("audio-volume-muted")); } else { icon = QIcon::fromTheme(QStringLiteral("audio-volume-medium")); }*/ } void Monitor::sendFrameForAnalysis(bool analyse) { m_glMonitor->sendFrameForAnalysis = analyse; } void Monitor::updateAudioForAnalysis() { m_glMonitor->updateAudioForAnalysis(); } void Monitor::onFrameDisplayed(const SharedFrame &frame) { m_monitorManager->frameDisplayed(frame); if (!m_glMonitor->checkFrameNumber(frame.get_position(), m_id == Kdenlive::ClipMonitor ? 0 : TimelineModel::seekDuration + 1)) { m_playAction->setActive(false); } checkDrops(m_glMonitor->droppedFrames()); } void Monitor::checkDrops(int dropped) { if (m_droppedTimer.isValid()) { if (m_droppedTimer.hasExpired(1000)) { m_droppedTimer.invalidate(); double fps = m_monitorManager->timecode().fps(); if (dropped == 0) { // No dropped frames since last check m_qmlManager->setProperty(QStringLiteral("dropped"), false); m_qmlManager->setProperty(QStringLiteral("fps"), QString::number(fps, 'g', 2)); } else { m_glMonitor->resetDrops(); fps -= dropped; m_qmlManager->setProperty(QStringLiteral("dropped"), true); m_qmlManager->setProperty(QStringLiteral("fps"), QString::number(fps, 'g', 2)); m_droppedTimer.start(); } } } else if (dropped > 0) { // Start m_dropTimer m_glMonitor->resetDrops(); m_droppedTimer.start(); } } void Monitor::reloadProducer(const QString &id) { if (!m_controller) { return; } if (m_controller->AbstractProjectItem::clipId() == id) { slotOpenClip(m_controller); } } QString Monitor::getMarkerThumb(GenTime pos) { if (!m_controller) { return QString(); } if (!m_controller->getClipHash().isEmpty()) { QString url = m_monitorManager->getCacheFolder(CacheThumbs) .absoluteFilePath(m_controller->getClipHash() + QLatin1Char('#') + QString::number((int)pos.frames(m_monitorManager->timecode().fps())) + QStringLiteral(".png")); if (QFile::exists(url)) { return url; } } return QString(); } const QString Monitor::projectFolder() const { return m_monitorManager->getProjectFolder(); } void Monitor::setPalette(const QPalette &p) { QWidget::setPalette(p); QList allButtons = this->findChildren(); for (int i = 0; i < allButtons.count(); i++) { QToolButton *m = allButtons.at(i); QIcon ic = m->icon(); if (ic.isNull() || ic.name().isEmpty()) { continue; } QIcon newIcon = QIcon::fromTheme(ic.name()); m->setIcon(newIcon); } QQuickItem *root = m_glMonitor->rootObject(); if (root) { QMetaObject::invokeMethod(root, "updatePalette"); } m_audioMeterWidget->refreshPixmap(); } void Monitor::gpuError() { qCWarning(KDENLIVE_LOG) << " + + + + Error initializing Movit GLSL manager"; warningMessage(i18n("Cannot initialize Movit's GLSL manager, please disable Movit"), -1); } void Monitor::warningMessage(const QString &text, int timeout, const QList &actions) { m_infoMessage->setMessageType(KMessageWidget::Warning); m_infoMessage->setText(text); for (QAction *action : actions) { m_infoMessage->addAction(action); } m_infoMessage->setCloseButtonVisible(true); m_infoMessage->animatedShow(); if (timeout > 0) { QTimer::singleShot(timeout, m_infoMessage, &KMessageWidget::animatedHide); } } void Monitor::activateSplit() { loadQmlScene(MonitorSceneSplit); if (isActive()) { m_glMonitor->requestRefresh(); } else if (slotActivateMonitor()) { start(); } } void Monitor::slotSwitchCompare(bool enable) { if (m_id == Kdenlive::ProjectMonitor) { if (enable) { if (m_qmlManager->sceneType() == MonitorSceneSplit) { // Split scene is already active return; } m_splitEffect = new Mlt::Filter(pCore->getCurrentProfile()->profile(), "frei0r.alphagrad"); if ((m_splitEffect != nullptr) && m_splitEffect->is_valid()) { m_splitEffect->set("0", 0.5); // 0 is the Clip left parameter m_splitEffect->set("1", 0); // 1 is gradient width m_splitEffect->set("2", -0.747); // 2 is tilt } else { // frei0r.scal0tilt is not available warningMessage(i18n("The alphagrad filter is required for that feature, please install frei0r and restart Kdenlive")); return; } emit createSplitOverlay(m_splitEffect); return; } // Delete temp track emit removeSplitOverlay(); delete m_splitEffect; m_splitEffect = nullptr; loadQmlScene(MonitorSceneDefault); if (isActive()) { m_glMonitor->requestRefresh(); } else if (slotActivateMonitor()) { start(); } return; } if (m_controller == nullptr || !m_controller->hasEffects()) { // disable split effect if (m_controller) { pCore->displayMessage(i18n("Clip has no effects"), InformationMessage); } else { pCore->displayMessage(i18n("Select a clip in project bin to compare effect"), InformationMessage); } return; } if (enable) { if (m_qmlManager->sceneType() == MonitorSceneSplit) { // Split scene is already active qDebug() << " . . . .. ALREADY ACTIVE"; return; } buildSplitEffect(m_controller->masterProducer()); } else if (m_splitEffect) { // TODO m_glMonitor->setProducer(m_controller->originalProducer(), isActive(), position()); delete m_splitEffect; m_splitProducer.reset(); m_splitEffect = nullptr; loadQmlScene(MonitorSceneDefault); } slotActivateMonitor(); } void Monitor::buildSplitEffect(Mlt::Producer *original) { m_splitEffect = new Mlt::Filter(pCore->getCurrentProfile()->profile(), "frei0r.alphagrad"); if ((m_splitEffect != nullptr) && m_splitEffect->is_valid()) { m_splitEffect->set("0", 0.5); // 0 is the Clip left parameter m_splitEffect->set("1", 0); // 1 is gradient width m_splitEffect->set("2", -0.747); // 2 is tilt } else { // frei0r.scal0tilt is not available pCore->displayMessage(i18n("The alphagrad filter is required for that feature, please install frei0r and restart Kdenlive"), ErrorMessage); return; } QString splitTransition = TransitionsRepository::get()->getCompositingTransition(); Mlt::Transition t(pCore->getCurrentProfile()->profile(), splitTransition.toUtf8().constData()); if (!t.is_valid()) { delete m_splitEffect; pCore->displayMessage(i18n("The cairoblend transition is required for that feature, please install frei0r and restart Kdenlive"), ErrorMessage); return; } Mlt::Tractor trac(pCore->getCurrentProfile()->profile()); std::shared_ptr clone = ProjectClip::cloneProducer(std::make_shared(original)); // Delete all effects int ct = 0; Mlt::Filter *filter = clone->filter(ct); while (filter != nullptr) { QString ix = QString::fromLatin1(filter->get("kdenlive_id")); if (!ix.isEmpty()) { if (clone->detach(*filter) == 0) { } else { ct++; } } else { ct++; } delete filter; filter = clone->filter(ct); } trac.set_track(*original, 0); trac.set_track(*clone.get(), 1); clone.get()->attach(*m_splitEffect); t.set("always_active", 1); trac.plant_transition(t, 0, 1); delete original; m_splitProducer = std::make_shared(trac.get_producer()); m_glMonitor->setProducer(m_splitProducer, isActive(), position()); m_glMonitor->setRulerInfo((int)m_controller->frameDuration(), m_controller->getMarkerModel()); loadQmlScene(MonitorSceneSplit); } QSize Monitor::profileSize() const { return m_glMonitor->profileSize(); } void Monitor::loadQmlScene(MonitorSceneType type) { if (m_id == Kdenlive::DvdMonitor || type == m_qmlManager->sceneType()) { return; } bool sceneWithEdit = type == MonitorSceneGeometry || type == MonitorSceneCorners || type == MonitorSceneRoto; if ((m_sceneVisibilityAction != nullptr) && !m_sceneVisibilityAction->isChecked() && sceneWithEdit) { // User doesn't want effect scenes pCore->displayMessage(i18n("Enable edit mode in monitor to edit effect"), InformationMessage, 500); type = MonitorSceneDefault; } double ratio = (double)m_glMonitor->profileSize().width() / (int)(m_glMonitor->profileSize().height() * pCore->getCurrentProfile()->dar() + 0.5); m_qmlManager->setScene(m_id, type, m_glMonitor->profileSize(), ratio, m_glMonitor->displayRect(), m_glMonitor->zoom(), m_timePos->maximum()); QQuickItem *root = m_glMonitor->rootObject(); switch (type) { case MonitorSceneSplit: QObject::connect(root, SIGNAL(qmlMoveSplit()), this, SLOT(slotAdjustEffectCompare()), Qt::UniqueConnection); break; case MonitorSceneGeometry: case MonitorSceneCorners: case MonitorSceneRoto: break; case MonitorSceneRipple: QObject::connect(root, SIGNAL(doAcceptRipple(bool)), this, SIGNAL(acceptRipple(bool)), Qt::UniqueConnection); QObject::connect(root, SIGNAL(switchTrimMode(int)), this, SIGNAL(switchTrimMode(int)), Qt::UniqueConnection); break; case MonitorSceneDefault: QObject::connect(root, SIGNAL(editCurrentMarker()), this, SLOT(slotEditInlineMarker()), Qt::UniqueConnection); m_qmlManager->setProperty(QStringLiteral("timecode"), m_timePos->displayText()); if (m_id == Kdenlive::ClipMonitor) { updateQmlDisplay(KdenliveSettings::displayClipMonitorInfo()); } else if (m_id == Kdenlive::ProjectMonitor) { updateQmlDisplay(KdenliveSettings::displayProjectMonitorInfo()); } break; default: break; } m_qmlManager->setProperty(QStringLiteral("fps"), QString::number(m_monitorManager->timecode().fps(), 'g', 2)); } void Monitor::setQmlProperty(const QString &name, const QVariant &value) { m_qmlManager->setProperty(name, value); } void Monitor::slotAdjustEffectCompare() { QRect r = m_glMonitor->rect(); double percent = 0.5; if (m_qmlManager->sceneType() == MonitorSceneSplit) { // Adjust splitter pos QQuickItem *root = m_glMonitor->rootObject(); percent = 0.5 - ((root->property("splitterPos").toInt() - r.left() - r.width() / 2.0) / (double)r.width() / 2.0) / 0.75; // Store real frame percentage for resize events root->setProperty("realpercent", percent); } if (m_splitEffect) { m_splitEffect->set("0", percent); } m_glMonitor->refresh(); } void Monitor::slotSwitchRec(bool enable) { if (!m_recManager) { return; } if (enable) { m_toolbar->setVisible(false); m_recManager->toolbar()->setVisible(true); } else if (m_recManager->toolbar()->isVisible()) { m_recManager->stop(); m_toolbar->setVisible(true); emit refreshCurrentClip(); } } void Monitor::doKeyPressEvent(QKeyEvent *ev) { keyPressEvent(ev); } void Monitor::slotEditInlineMarker() { QQuickItem *root = m_glMonitor->rootObject(); if (root) { std::shared_ptr model; if (m_controller) { // We are editing a clip marker model = m_controller->getMarkerModel(); } else { model = pCore->currentDoc()->getGuideModel(); } QString newComment = root->property("markerText").toString(); bool found = false; CommentedTime oldMarker = model->getMarker(m_timePos->gentime(), &found); if (!found || newComment == oldMarker.comment()) { // No change return; } oldMarker.setComment(newComment); model->addMarker(oldMarker.time(), oldMarker.comment(), oldMarker.markerType()); } } void Monitor::prepareAudioThumb(int channels, const QList &audioCache) { m_glMonitor->setAudioThumb(channels, audioCache); } void Monitor::slotSwitchAudioMonitor() { if (!m_audioMeterWidget->isValid) { KdenliveSettings::setMonitoraudio(0x01); m_audioMeterWidget->setVisibility(false); return; } int currentOverlay = KdenliveSettings::monitoraudio(); currentOverlay ^= m_id; KdenliveSettings::setMonitoraudio(currentOverlay); if ((KdenliveSettings::monitoraudio() & m_id) != 0) { // We want to enable this audio monitor, so make monitor active slotActivateMonitor(); } displayAudioMonitor(isActive()); } void Monitor::displayAudioMonitor(bool isActive) { bool enable = isActive && ((KdenliveSettings::monitoraudio() & m_id) != 0); if (enable) { connect(m_monitorManager, &MonitorManager::frameDisplayed, m_audioMeterWidget, &ScopeWidget::onNewFrame, Qt::UniqueConnection); } else { disconnect(m_monitorManager, &MonitorManager::frameDisplayed, m_audioMeterWidget, &ScopeWidget::onNewFrame); } m_audioMeterWidget->setVisibility((KdenliveSettings::monitoraudio() & m_id) != 0); } void Monitor::updateQmlDisplay(int currentOverlay) { m_glMonitor->rootObject()->setVisible((currentOverlay & 0x01) != 0); m_glMonitor->rootObject()->setProperty("showMarkers", currentOverlay & 0x04); m_glMonitor->rootObject()->setProperty("showFps", currentOverlay & 0x20); m_glMonitor->rootObject()->setProperty("showTimecode", currentOverlay & 0x02); m_glMonitor->rootObject()->setProperty("showAudiothumb", currentOverlay & 0x10); } void Monitor::clearDisplay() { m_glMonitor->clear(); } void Monitor::panView(QPoint diff) { // Only pan if scrollbars are visible if (m_horizontalScroll->isVisible()) { m_horizontalScroll->setValue(m_horizontalScroll->value() + diff.x()); } if (m_verticalScroll->isVisible()) { m_verticalScroll->setValue(m_verticalScroll->value() + diff.y()); } } void Monitor::requestSeek(int pos) { m_glMonitor->seek(pos); } void Monitor::setProducer(std::shared_ptr producer, int pos) { m_glMonitor->setProducer(std::move(producer), isActive(), pos); } void Monitor::reconfigure() { m_glMonitor->reconfigure(); } void Monitor::slotSeekPosition(int pos) { m_timePos->setValue(pos); checkOverlay(); } void Monitor::slotStart() { slotActivateMonitor(); m_glMonitor->switchPlay(false); m_glMonitor->seek(0); } void Monitor::slotEnd() { slotActivateMonitor(); m_glMonitor->switchPlay(false); if (m_id == Kdenlive::ClipMonitor) { m_glMonitor->seek(m_glMonitor->duration()); } else { m_glMonitor->seek(pCore->projectDuration()); } } void Monitor::addSnapPoint(int pos) { m_snaps->addPoint(pos); } void Monitor::removeSnapPoint(int pos) { m_snaps->removePoint(pos); } void Monitor::slotZoomIn() { m_glMonitor->slotZoom(true); } void Monitor::slotZoomOut() { m_glMonitor->slotZoom(false); } void Monitor::setConsumerProperty(const QString &name, const QString &value) { m_glMonitor->setConsumerProperty(name, value); } diff --git a/src/monitor/qmlmanager.cpp b/src/monitor/qmlmanager.cpp index 803f8f952..2aa3ad8db 100644 --- a/src/monitor/qmlmanager.cpp +++ b/src/monitor/qmlmanager.cpp @@ -1,159 +1,159 @@ /*************************************************************************** * Copyright (C) 2016 by Jean-Baptiste Mardelle (jb@kdenlive.org) * * This file is part of Kdenlive. See www.kdenlive.org. * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of the GNU General Public License as * * published by the Free Software Foundation; either version 2 of * * the License or (at your option) version 3 or any later version * * accepted by the membership of KDE e.V. (or its successor approved * * by the membership of KDE e.V.), which shall act as a proxy * * defined in Section 14 of version 3 of the license. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see . * ***************************************************************************/ #include "qmlmanager.h" #include "qml/qmlaudiothumb.h" #include #include #include QmlManager::QmlManager(QQuickView *view) : QObject(view) , m_view(view) , m_sceneType(MonitorSceneNone) { } void QmlManager::enableAudioThumbs(bool enable) { auto *audioThumbDisplay = m_view->rootObject()->findChild(QStringLiteral("audiothumb")); if (audioThumbDisplay) { audioThumbDisplay->setProperty("stateVisible", enable); } } MonitorSceneType QmlManager::sceneType() const { return m_sceneType; } void QmlManager::setProperty(const QString &name, const QVariant &value) { m_view->rootObject()->setProperty(name.toUtf8().constData(), value); } void QmlManager::setScene(Kdenlive::MonitorId id, MonitorSceneType type, QSize profile, double profileStretch, QRect displayRect, double zoom, int duration) { if (type == m_sceneType) { // Scene type already active return; } if (id == Kdenlive::DvdMonitor) { return; } m_sceneType = type; QQuickItem *root = nullptr; + const QFont ft = QFontDatabase::systemFont(QFontDatabase::FixedFont); + m_view->rootContext()->setContextProperty("fixedFont", ft); switch (type) { case MonitorSceneGeometry: m_view->setSource(QUrl(QStringLiteral("qrc:/qml/kdenlivemonitoreffectscene.qml"))); root = m_view->rootObject(); QObject::connect(root, SIGNAL(effectChanged()), this, SLOT(effectRectChanged()), Qt::UniqueConnection); QObject::connect(root, SIGNAL(centersChanged()), this, SLOT(effectPolygonChanged()), Qt::UniqueConnection); root->setProperty("profile", QPoint(profile.width(), profile.height())); root->setProperty("framesize", QRect(0, 0, profile.width(), profile.height())); root->setProperty("scalex", (double)displayRect.width() / profile.width() * zoom); root->setProperty("scaley", (double)displayRect.width() / profileStretch / profile.width() * zoom); root->setProperty("center", displayRect.center()); break; case MonitorSceneCorners: qDebug() << "/// LOADING CORNERS SCENE\n\n+++++++++++++++++++++++++\n------------------\n+++++++++++++++++"; m_view->setSource(QUrl(QStringLiteral("qrc:/qml/kdenlivemonitorcornerscene.qml"))); root = m_view->rootObject(); QObject::connect(root, SIGNAL(effectPolygonChanged()), this, SLOT(effectPolygonChanged()), Qt::UniqueConnection); root->setProperty("profile", QPoint(profile.width(), profile.height())); root->setProperty("framesize", QRect(0, 0, profile.width(), profile.height())); root->setProperty("scalex", (double)displayRect.width() / profile.width() * zoom); root->setProperty("scaley", (double)displayRect.width() / profileStretch / profile.width() * zoom); root->setProperty("stretch", profileStretch); root->setProperty("center", displayRect.center()); break; case MonitorSceneRoto: m_view->setSource(QUrl(QStringLiteral("qrc:/qml/kdenlivemonitorrotoscene.qml"))); root = m_view->rootObject(); QObject::connect(root, SIGNAL(effectPolygonChanged()), this, SLOT(effectRotoChanged()), Qt::UniqueConnection); root->setProperty("profile", QPoint(profile.width(), profile.height())); root->setProperty("framesize", QRect(0, 0, profile.width(), profile.height())); root->setProperty("scalex", (double)displayRect.width() / profile.width() * zoom); root->setProperty("scaley", (double)displayRect.width() / profileStretch / profile.width() * zoom); root->setProperty("stretch", profileStretch); root->setProperty("center", displayRect.center()); break; case MonitorSceneSplit: m_view->setSource(QUrl(QStringLiteral("qrc:/qml/kdenlivemonitorsplit.qml"))); root = m_view->rootObject(); break; case MonitorSceneRipple: m_view->setSource(QUrl(QStringLiteral("qrc:/qml/kdenlivemonitorripple.qml"))); root = m_view->rootObject(); break; default: m_view->setSource( QUrl(id == Kdenlive::ClipMonitor ? QStringLiteral("qrc:/qml/kdenliveclipmonitor.qml") : QStringLiteral("qrc:/qml/kdenlivemonitor.qml"))); root = m_view->rootObject(); root->setProperty("profile", QPoint(profile.width(), profile.height())); root->setProperty("scalex", (double)displayRect.width() / profile.width() * zoom); root->setProperty("scaley", (double)displayRect.width() / profileStretch / profile.width() * zoom); break; } if (root && duration > 0) { root->setProperty("duration", duration); } - const QFont ft = QFontDatabase::systemFont(QFontDatabase::FixedFont); - m_view->rootContext()->setContextProperty("fixedFont", ft); } void QmlManager::effectRectChanged() { if (!m_view->rootObject()) { return; } const QRect rect = m_view->rootObject()->property("framesize").toRect(); emit effectChanged(rect); } void QmlManager::effectPolygonChanged() { if (!m_view->rootObject()) { return; } QVariantList points = m_view->rootObject()->property("centerPoints").toList(); qDebug() << "// GOT NEW POLYGON FROM QML: " << points; emit effectPointsChanged(points); } void QmlManager::effectRotoChanged() { if (!m_view->rootObject()) { return; } QVariantList points = m_view->rootObject()->property("centerPoints").toList(); QVariantList controlPoints = m_view->rootObject()->property("centerPointsTypes").toList(); // rotoscoping effect needs a list of QVariantList mix; mix.reserve(points.count()); for (int i = 0; i < points.count(); i++) { mix << controlPoints.at(2 * i); mix << points.at(i); mix << controlPoints.at(2 * i + 1); } emit effectPointsChanged(mix); } diff --git a/src/timeline2/model/clipmodel.cpp b/src/timeline2/model/clipmodel.cpp index b91041982..2fdb4cc83 100644 --- a/src/timeline2/model/clipmodel.cpp +++ b/src/timeline2/model/clipmodel.cpp @@ -1,728 +1,749 @@ /*************************************************************************** * Copyright (C) 2017 by Nicolas Carion * * This file is part of Kdenlive. See www.kdenlive.org. * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) version 3 or any later version accepted by the * * membership of KDE e.V. (or its successor approved by the membership * * of KDE e.V.), which shall act as a proxy defined in Section 14 of * * version 3 of the license. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see . * ***************************************************************************/ #include "clipmodel.hpp" #include "bin/projectclip.h" #include "bin/projectitemmodel.h" #include "clipsnapmodel.hpp" #include "core.h" #include "effects/effectstack/model/effectstackmodel.hpp" #include "logger.hpp" #include "macros.hpp" #include "timelinemodel.hpp" #include "trackmodel.hpp" #include #include #include #include ClipModel::ClipModel(const std::shared_ptr &parent, std::shared_ptr prod, const QString &binClipId, int id, PlaylistState::ClipState state, double speed) : MoveableItem(parent, id) , m_producer(std::move(prod)) , m_effectStack(EffectStackModel::construct(m_producer, {ObjectType::TimelineClip, m_id}, parent->m_undoStack)) , m_clipMarkerModel(new ClipSnapModel()) , m_binClipId(binClipId) , forceThumbReload(false) , m_currentState(state) , m_speed(speed) , m_fakeTrack(-1) + , m_positionOffset(0) { m_producer->set("kdenlive:id", binClipId.toUtf8().constData()); m_producer->set("_kdenlive_cid", m_id); std::shared_ptr binClip = pCore->projectItemModel()->getClipByBinID(m_binClipId); m_canBeVideo = binClip->hasVideo(); m_canBeAudio = binClip->hasAudio(); m_clipType = binClip->clipType(); if (binClip) { m_endlessResize = !binClip->hasLimitedDuration(); } else { m_endlessResize = false; } QObject::connect(m_effectStack.get(), &EffectStackModel::dataChanged, [&](const QModelIndex &, const QModelIndex &, QVector roles) { qDebug() << "// GOT CLIP STACK DATA CHANGE: " << roles; if (m_currentTrackId != -1) { if (auto ptr = m_parent.lock()) { QModelIndex ix = ptr->makeClipIndexFromID(m_id); qDebug() << "// GOT CLIP STACK DATA CHANGE DONE: " << ix << " = " << roles; ptr->dataChanged(ix, ix, roles); } } }); } int ClipModel::construct(const std::shared_ptr &parent, const QString &binClipId, int id, PlaylistState::ClipState state, double speed) { id = (id == -1 ? TimelineModel::getNextId() : id); std::shared_ptr binClip = pCore->projectItemModel()->getClipByBinID(binClipId); // We refine the state according to what the clip can actually produce std::pair videoAudio = stateToBool(state); videoAudio.first = videoAudio.first && binClip->hasVideo(); videoAudio.second = videoAudio.second && binClip->hasAudio(); state = stateFromBool(videoAudio); std::shared_ptr cutProducer = binClip->getTimelineProducer(-1, id, state, speed); std::shared_ptr clip(new ClipModel(parent, cutProducer, binClipId, id, state, speed)); TRACE_CONSTR(clip.get(), parent, binClipId, id, state, speed); clip->setClipState_lambda(state)(); parent->registerClip(clip); clip->m_clipMarkerModel->setReferenceModel(binClip->getMarkerModel()); return id; } int ClipModel::construct(const std::shared_ptr &parent, const QString &binClipId, const std::shared_ptr &producer, PlaylistState::ClipState state) { // we hand the producer to the bin clip, and in return we get a cut to a good master producer // We might not be able to use directly the producer that we receive as an argument, because it cannot share the same master producer with any other // clipModel (due to a mlt limitation, see ProjectClip doc) int id = TimelineModel::getNextId(); std::shared_ptr binClip = pCore->projectItemModel()->getClipByBinID(binClipId); // We refine the state according to what the clip can actually produce std::pair videoAudio = stateToBool(state); videoAudio.first = videoAudio.first && binClip->hasVideo(); videoAudio.second = videoAudio.second && binClip->hasAudio(); state = stateFromBool(videoAudio); double speed = 1.0; if (QString::fromUtf8(producer->parent().get("mlt_service")) == QLatin1String("timewarp")) { speed = producer->parent().get_double("warp_speed"); } auto result = binClip->giveMasterAndGetTimelineProducer(id, producer, state); std::shared_ptr clip(new ClipModel(parent, result.first, binClipId, id, state, speed)); clip->setClipState_lambda(state)(); clip->m_effectStack->importEffects(producer, state, result.second); parent->registerClip(clip); clip->m_clipMarkerModel->setReferenceModel(binClip->getMarkerModel()); return id; } void ClipModel::registerClipToBin(std::shared_ptr service, bool registerProducer) { std::shared_ptr binClip = pCore->projectItemModel()->getClipByBinID(m_binClipId); if (!binClip) { qDebug() << "Error : Bin clip for id: " << m_binClipId << " NOT AVAILABLE!!!"; } qDebug() << "REGISTRATION " << m_id << "ptr count" << m_parent.use_count(); binClip->registerService(m_parent, m_id, std::move(service), registerProducer); } void ClipModel::deregisterClipToBin() { std::shared_ptr binClip = pCore->projectItemModel()->getClipByBinID(m_binClipId); binClip->deregisterTimelineClip(m_id); } ClipModel::~ClipModel() = default; bool ClipModel::requestResize(int size, bool right, Fun &undo, Fun &redo, bool logUndo) { QWriteLocker locker(&m_lock); // qDebug() << "RESIZE CLIP" << m_id << "target size=" << size << "right=" << right << "endless=" << m_endlessResize << "length" << // m_producer->get_length(); if (!m_endlessResize && (size <= 0 || size > m_producer->get_length())) { return false; } int delta = getPlaytime() - size; if (delta == 0) { return true; } int in = m_producer->get_in(); int out = m_producer->get_out(); int old_in = in, old_out = out; // check if there is enough space on the chosen side if (!right && in + delta < 0 && !m_endlessResize) { return false; } if (!m_endlessResize && right && (out - delta >= m_producer->get_length())) { return false; } if (right) { out -= delta; } else { in += delta; } // qDebug() << "Resize facts delta =" << delta << "old in" << old_in << "old_out" << old_out << "in" << in << "out" << out; std::function track_operation = []() { return true; }; std::function track_reverse = []() { return true; }; int outPoint = out; int inPoint = in; int offset = 0; if (m_endlessResize) { offset = inPoint; outPoint = out - in; inPoint = 0; } if (m_currentTrackId != -1) { if (auto ptr = m_parent.lock()) { track_operation = ptr->getTrackById(m_currentTrackId)->requestClipResize_lambda(m_id, inPoint, outPoint, right); } else { qDebug() << "Error : Moving clip failed because parent timeline is not available anymore"; Q_ASSERT(false); } } else { // Ensure producer is long enough if (m_endlessResize && outPoint > m_producer->parent().get_length()) { m_producer->set("length", outPoint + 1); } } Fun operation = [this, inPoint, outPoint, track_operation]() { if (track_operation()) { setInOut(inPoint, outPoint); return true; } return false; }; if (operation()) { // Now, we are in the state in which the timeline should be when we try to revert current action. So we can build the reverse action from here if (m_currentTrackId != -1) { QVector roles{TimelineModel::DurationRole}; if (!right) { roles.push_back(TimelineModel::StartRole); roles.push_back(TimelineModel::InPointRole); } else { roles.push_back(TimelineModel::OutPointRole); } if (auto ptr = m_parent.lock()) { QModelIndex ix = ptr->makeClipIndexFromID(m_id); // TODO: integrate in undo ptr->dataChanged(ix, ix, roles); track_reverse = ptr->getTrackById(m_currentTrackId)->requestClipResize_lambda(m_id, old_in, old_out, right); } } Fun reverse = [this, old_in, old_out, track_reverse]() { if (track_reverse()) { setInOut(old_in, old_out); return true; } return false; }; qDebug() << "----------\n-----------\n// ADJUSTING EFFECT LENGTH, LOGUNDO " << logUndo << ", " << old_in << "/" << inPoint << ", " << m_producer->get_playtime(); if (logUndo) { adjustEffectLength(right, old_in, inPoint, old_out - old_in, m_producer->get_playtime(), offset, reverse, operation, logUndo); } UPDATE_UNDO_REDO(operation, reverse, undo, redo); return true; } return false; } const QString ClipModel::getProperty(const QString &name) const { READ_LOCK(); if (service()->parent().is_valid()) { return QString::fromUtf8(service()->parent().get(name.toUtf8().constData())); } return QString::fromUtf8(service()->get(name.toUtf8().constData())); } int ClipModel::getIntProperty(const QString &name) const { READ_LOCK(); if (service()->parent().is_valid()) { return service()->parent().get_int(name.toUtf8().constData()); } return service()->get_int(name.toUtf8().constData()); } QSize ClipModel::getFrameSize() const { READ_LOCK(); if (service()->parent().is_valid()) { return QSize(service()->parent().get_int("meta.media.width"), service()->parent().get_int("meta.media.height")); } return {service()->get_int("meta.media.width"), service()->get_int("meta.media.height")}; } double ClipModel::getDoubleProperty(const QString &name) const { READ_LOCK(); if (service()->parent().is_valid()) { return service()->parent().get_double(name.toUtf8().constData()); } return service()->get_double(name.toUtf8().constData()); } Mlt::Producer *ClipModel::service() const { READ_LOCK(); return m_producer.get(); } std::shared_ptr ClipModel::getProducer() { READ_LOCK(); return m_producer; } int ClipModel::getPlaytime() const { READ_LOCK(); return m_producer->get_playtime(); } void ClipModel::setTimelineEffectsEnabled(bool enabled) { QWriteLocker locker(&m_lock); m_effectStack->setEffectStackEnabled(enabled); } bool ClipModel::addEffect(const QString &effectId) { QWriteLocker locker(&m_lock); if (EffectsRepository::get()->getType(effectId) == EffectType::Audio) { if (m_currentState == PlaylistState::VideoOnly) { return false; } } else if (m_currentState == PlaylistState::AudioOnly) { return false; } m_effectStack->appendEffect(effectId); return true; } bool ClipModel::copyEffect(const std::shared_ptr &stackModel, int rowId) { QWriteLocker locker(&m_lock); m_effectStack->copyEffect(stackModel->getEffectStackRow(rowId), m_currentState); return true; } bool ClipModel::importEffects(std::shared_ptr stackModel) { QWriteLocker locker(&m_lock); m_effectStack->importEffects(std::move(stackModel), m_currentState); return true; } bool ClipModel::importEffects(std::weak_ptr service) { QWriteLocker locker(&m_lock); m_effectStack->importEffects(std::move(service), m_currentState); return true; } bool ClipModel::removeFade(bool fromStart) { QWriteLocker locker(&m_lock); m_effectStack->removeFade(fromStart); return true; } bool ClipModel::adjustEffectLength(bool adjustFromEnd, int oldIn, int newIn, int oldDuration, int duration, int offset, Fun &undo, Fun &redo, bool logUndo) { QWriteLocker locker(&m_lock); return m_effectStack->adjustStackLength(adjustFromEnd, oldIn, oldDuration, newIn, duration, offset, undo, redo, logUndo); } bool ClipModel::adjustEffectLength(const QString &effectName, int duration, int originalDuration, Fun &undo, Fun &redo) { QWriteLocker locker(&m_lock); qDebug() << ".... ADJUSTING FADE LENGTH: " << duration << " / " << effectName; Fun operation = [this, duration, effectName, originalDuration]() { return m_effectStack->adjustFadeLength(duration, effectName == QLatin1String("fadein") || effectName == QLatin1String("fade_to_black"), audioEnabled(), !isAudioOnly(), originalDuration > 0); }; if (operation() && originalDuration > 0) { Fun reverse = [this, originalDuration, effectName]() { return m_effectStack->adjustFadeLength(originalDuration, effectName == QLatin1String("fadein") || effectName == QLatin1String("fade_to_black"), audioEnabled(), !isAudioOnly(), true); }; UPDATE_UNDO_REDO(operation, reverse, undo, redo); } return true; } bool ClipModel::audioEnabled() const { READ_LOCK(); return stateToBool(m_currentState).second; } bool ClipModel::isAudioOnly() const { READ_LOCK(); return m_currentState == PlaylistState::AudioOnly; } void ClipModel::refreshProducerFromBin(PlaylistState::ClipState state, double speed) { // We require that the producer is not in the track when we refresh the producer, because otherwise the modification will not be propagated. Remove the clip // first, refresh, and then replant. QWriteLocker locker(&m_lock); int in = getIn(); int out = getOut(); qDebug() << "refresh " << speed << m_speed << in << out; if (!qFuzzyCompare(speed, m_speed) && !qFuzzyCompare(speed, 0.)) { in = in * std::abs(m_speed / speed); out = in + getPlaytime() - 1; // prevent going out of the clip's range out = std::min(out, int(double(m_producer->get_length()) * std::abs(m_speed / speed)) - 1); m_speed = speed; qDebug() << "changing speed" << in << out << m_speed; } std::shared_ptr binClip = pCore->projectItemModel()->getClipByBinID(m_binClipId); std::shared_ptr binProducer = binClip->getTimelineProducer(m_currentTrackId, m_id, state, m_speed); m_producer = std::move(binProducer); m_producer->set_in_and_out(in, out); // replant effect stack in updated service m_effectStack->resetService(m_producer); m_producer->set("kdenlive:id", binClip->clipId().toUtf8().constData()); m_producer->set("_kdenlive_cid", m_id); m_endlessResize = !binClip->hasLimitedDuration(); } void ClipModel::refreshProducerFromBin() { refreshProducerFromBin(m_currentState); } bool ClipModel::useTimewarpProducer(double speed, Fun &undo, Fun &redo) { if (m_endlessResize) { // no timewarp for endless producers return false; } if (qFuzzyCompare(speed, m_speed)) { // nothing to do return true; } std::function local_undo = []() { return true; }; std::function local_redo = []() { return true; }; double previousSpeed = getSpeed(); int oldDuration = getPlaytime(); int newDuration = int(double(oldDuration) * std::abs(previousSpeed / speed)); int oldOut = getOut(); int oldIn = getIn(); auto operation = useTimewarpProducer_lambda(speed); auto reverse = useTimewarpProducer_lambda(previousSpeed); if (oldOut >= newDuration) { // in that case, we are going to shrink the clip when changing the producer. We must undo that when reloading the old producer reverse = [reverse, oldIn, oldOut, this]() { bool res = reverse(); if (res) { setInOut(oldIn, oldOut); } return res; }; } if (operation()) { UPDATE_UNDO_REDO(operation, reverse, local_undo, local_redo); bool res = requestResize(newDuration, true, local_undo, local_redo, true); if (!res) { local_undo(); return false; } UPDATE_UNDO_REDO(local_redo, local_undo, undo, redo); return true; } qDebug() << "tw: operation fail"; return false; } Fun ClipModel::useTimewarpProducer_lambda(double speed) { QWriteLocker locker(&m_lock); return [speed, this]() { qDebug() << "timeWarp producer" << speed; refreshProducerFromBin(m_currentState, speed); if (auto ptr = m_parent.lock()) { QModelIndex ix = ptr->makeClipIndexFromID(m_id); ptr->notifyChange(ix, ix, TimelineModel::SpeedRole); } return true; }; } QVariant ClipModel::getAudioWaveform() { READ_LOCK(); std::shared_ptr binClip = pCore->projectItemModel()->getClipByBinID(m_binClipId); if (binClip) { return QVariant::fromValue(binClip->audioFrameCache); } return QVariant(); } const QString &ClipModel::binId() const { return m_binClipId; } std::shared_ptr ClipModel::getMarkerModel() const { READ_LOCK(); return pCore->projectItemModel()->getClipByBinID(m_binClipId)->getMarkerModel(); } int ClipModel::audioChannels() const { READ_LOCK(); return pCore->projectItemModel()->getClipByBinID(m_binClipId)->audioChannels(); } int ClipModel::fadeIn() const { return m_effectStack->getFadePosition(true); } int ClipModel::fadeOut() const { return m_effectStack->getFadePosition(false); } double ClipModel::getSpeed() const { return m_speed; } KeyframeModel *ClipModel::getKeyframeModel() { return m_effectStack->getEffectKeyframeModel(); } bool ClipModel::showKeyframes() const { READ_LOCK(); return !service()->get_int("kdenlive:hide_keyframes"); } void ClipModel::setShowKeyframes(bool show) { QWriteLocker locker(&m_lock); service()->set("kdenlive:hide_keyframes", (int)!show); } void ClipModel::setPosition(int pos) { MoveableItem::setPosition(pos); m_clipMarkerModel->updateSnapModelPos(pos); } void ClipModel::setInOut(int in, int out) { MoveableItem::setInOut(in, out); m_clipMarkerModel->updateSnapModelInOut(std::pair(in, out)); } void ClipModel::setCurrentTrackId(int tid, bool finalMove) { if (tid == m_currentTrackId) { return; } bool registerSnap = m_currentTrackId == -1 && tid > -1; if (m_currentTrackId > -1 && tid == -1) { // Removing clip m_clipMarkerModel->deregisterSnapModel(); } MoveableItem::setCurrentTrackId(tid, finalMove); if (registerSnap) { if (auto ptr = m_parent.lock()) { m_clipMarkerModel->registerSnapModel(ptr->m_snaps, getPosition(), getIn(), getOut()); } } if (finalMove && tid != -1) { refreshProducerFromBin(m_currentState); } } Fun ClipModel::setClipState_lambda(PlaylistState::ClipState state) { QWriteLocker locker(&m_lock); return [this, state]() { if (auto ptr = m_parent.lock()) { switch (state) { case PlaylistState::Disabled: m_producer->set("set.test_audio", 1); m_producer->set("set.test_image", 1); break; case PlaylistState::VideoOnly: m_producer->set("set.test_image", 0); break; case PlaylistState::AudioOnly: m_producer->set("set.test_audio", 0); break; default: // error break; } m_currentState = state; if (m_currentTrackId != -1 && ptr->isClip(m_id)) { // if this is false, the clip is being created. Don't update model in that case QModelIndex ix = ptr->makeClipIndexFromID(m_id); ptr->dataChanged(ix, ix, {TimelineModel::StatusRole}); } return true; } return false; }; } bool ClipModel::setClipState(PlaylistState::ClipState state, Fun &undo, Fun &redo) { if (state == PlaylistState::VideoOnly && !canBeVideo()) { return false; } if (state == PlaylistState::AudioOnly && !canBeAudio()) { return false; } if (state == m_currentState) { return true; } auto old_state = m_currentState; auto operation = setClipState_lambda(state); if (operation()) { auto reverse = setClipState_lambda(old_state); UPDATE_UNDO_REDO(operation, reverse, undo, redo); return true; } return false; } PlaylistState::ClipState ClipModel::clipState() const { READ_LOCK(); return m_currentState; } ClipType::ProducerType ClipModel::clipType() const { READ_LOCK(); return m_clipType; } void ClipModel::passTimelineProperties(const std::shared_ptr &other) { READ_LOCK(); Mlt::Properties source(m_producer->get_properties()); Mlt::Properties dest(other->service()->get_properties()); dest.pass_list(source, "kdenlive:hide_keyframes,kdenlive:activeeffect"); } bool ClipModel::canBeVideo() const { return m_canBeVideo; } bool ClipModel::canBeAudio() const { return m_canBeAudio; } const QString ClipModel::effectNames() const { READ_LOCK(); return m_effectStack->effectNames(); } int ClipModel::getFakeTrackId() const { return m_fakeTrack; } void ClipModel::setFakeTrackId(int fid) { m_fakeTrack = fid; } int ClipModel::getFakePosition() const { return m_fakePosition; } void ClipModel::setFakePosition(int fid) { m_fakePosition = fid; } QDomElement ClipModel::toXml(QDomDocument &document) { QDomElement container = document.createElement(QStringLiteral("clip")); container.setAttribute(QStringLiteral("binid"), m_binClipId); container.setAttribute(QStringLiteral("id"), m_id); container.setAttribute(QStringLiteral("in"), getIn()); container.setAttribute(QStringLiteral("out"), getOut()); container.setAttribute(QStringLiteral("position"), getPosition()); container.setAttribute(QStringLiteral("state"), (int)m_currentState); if (auto ptr = m_parent.lock()) { int trackId = ptr->getTrackPosition(m_currentTrackId); container.setAttribute(QStringLiteral("track"), trackId); if (ptr->isAudioTrack(getCurrentTrackId())) { container.setAttribute(QStringLiteral("audioTrack"), 1); int mirrorId = ptr->getMirrorVideoTrackId(m_currentTrackId); if (mirrorId > -1) { mirrorId = ptr->getTrackPosition(mirrorId); } container.setAttribute(QStringLiteral("mirrorTrack"), mirrorId); } } container.setAttribute(QStringLiteral("speed"), m_speed); container.appendChild(m_effectStack->toXml(document)); return container; } bool ClipModel::checkConsistency() { if (!m_effectStack->checkConsistency()) { qDebug() << "Consistency check failed for effecstack"; return false; } std::shared_ptr binClip = pCore->projectItemModel()->getClipByBinID(m_binClipId); auto instances = binClip->timelineInstances(); bool found = false; for (const auto &i : instances) { if (i == m_id) { found = true; break; } } if (!found) { qDebug() << "ERROR: binClip doesn't acknowledge timeline clip existence"; return false; } if (m_currentState == PlaylistState::VideoOnly && !m_canBeVideo) { qDebug() << "ERROR: clip is in video state but doesn't have video"; return false; } if (m_currentState == PlaylistState::AudioOnly && !m_canBeAudio) { qDebug() << "ERROR: clip is in video state but doesn't have video"; return false; } // TODO: check speed return true; } int ClipModel::getSubPlaylistIndex() const { return m_subPlaylistIndex; } void ClipModel::setSubPlaylistIndex(int index) { m_subPlaylistIndex = index; } +void ClipModel::setOffset(int offset) +{ + m_positionOffset = offset; + if (auto ptr = m_parent.lock()) { + QModelIndex ix = ptr->makeClipIndexFromID(m_id); + ptr->dataChanged(ix, ix, {TimelineModel::PositionOffsetRole}); + } +} + +void ClipModel::clearOffset() +{ + if (m_positionOffset != 0) { + setOffset(0); + } +} + +int ClipModel::getOffset() const +{ + return m_positionOffset; +} diff --git a/src/timeline2/model/clipmodel.hpp b/src/timeline2/model/clipmodel.hpp index f705f13c7..cdbfb783e 100644 --- a/src/timeline2/model/clipmodel.hpp +++ b/src/timeline2/model/clipmodel.hpp @@ -1,227 +1,235 @@ /*************************************************************************** * Copyright (C) 2017 by Nicolas Carion * * This file is part of Kdenlive. See www.kdenlive.org. * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) version 3 or any later version accepted by the * * membership of KDE e.V. (or its successor approved by the membership * * of KDE e.V.), which shall act as a proxy defined in Section 14 of * * version 3 of the license. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see . * ***************************************************************************/ #ifndef CLIPMODEL_H #define CLIPMODEL_H #include "moveableItem.hpp" #include "undohelper.hpp" #include #include namespace Mlt { class Producer; } class EffectStackModel; class MarkerListModel; class TimelineModel; class TrackModel; class KeyframeModel; class ClipSnapModel; /* @brief This class represents a Clip object, as viewed by the backend. In general, the Gui associated with it will send modification queries (such as resize or move), and this class authorize them or not depending on the validity of the modifications */ class ClipModel : public MoveableItem { ClipModel() = delete; protected: /* This constructor is not meant to be called, call the static construct instead */ ClipModel(const std::shared_ptr &parent, std::shared_ptr prod, const QString &binClipId, int id, PlaylistState::ClipState state, double speed = 1.); public: ~ClipModel() override; /* @brief Creates a clip, which references itself to the parent timeline Returns the (unique) id of the created clip @param parent is a pointer to the timeline @param binClip is the id of the bin clip associated @param id Requested id of the clip. Automatic if -1 */ static int construct(const std::shared_ptr &parent, const QString &binClipId, int id, PlaylistState::ClipState state, double speed = 1.); /* @brief Creates a clip, which references itself to the parent timeline Returns the (unique) id of the created clip This variants assumes a producer is already known, which should typically happen only at loading time. Note that there is no guarantee that this producer is actually going to be used. It might be discarded. */ static int construct(const std::shared_ptr &parent, const QString &binClipId, const std::shared_ptr &producer, PlaylistState::ClipState state); /* @brief returns a property of the clip, or from it's parent if it's a cut */ const QString getProperty(const QString &name) const override; int getIntProperty(const QString &name) const; double getDoubleProperty(const QString &name) const; QSize getFrameSize() const; Q_INVOKABLE bool showKeyframes() const; Q_INVOKABLE void setShowKeyframes(bool show); /* @brief Returns true if the clip can be converted to a video clip */ bool canBeVideo() const; /* @brief Returns true if the clip can be converted to an audio clip */ bool canBeAudio() const; /* @brief Returns a comma separated list of effect names */ const QString effectNames() const; /** @brief Returns the timeline clip status (video / audio only) */ PlaylistState::ClipState clipState() const; /** @brief Returns the bin clip type (image, color, AV, ...) */ ClipType::ProducerType clipType() const; /** @brief Sets the timeline clip status (video / audio only) */ bool setClipState(PlaylistState::ClipState state, Fun &undo, Fun &redo); /** @brief The fake track is used in insrt/overwrote mode. * in this case, dragging a clip is always accepted, but the change is not applied to the model. * so we use a 'fake' track id to pass to the qml view */ int getFakeTrackId() const; void setFakeTrackId(int fid); int getFakePosition() const; void setFakePosition(int fid); /* @brief Returns an XML representation of the clip with its effects */ QDomElement toXml(QDomDocument &document); protected: // helper functions that creates the lambda Fun setClipState_lambda(PlaylistState::ClipState state); public: /* @brief returns the length of the item on the timeline */ int getPlaytime() const override; /** @brief Returns audio cache data from bin clip to display audio thumbs */ QVariant getAudioWaveform(); /** @brief Returns the bin clip's id */ const QString &binId() const; void registerClipToBin(std::shared_ptr service, bool registerProducer); void deregisterClipToBin(); bool addEffect(const QString &effectId); bool copyEffect(const std::shared_ptr &stackModel, int rowId); /* @brief Import effects from a different stackModel */ bool importEffects(std::shared_ptr stackModel); /* @brief Import effects from a service that contains some (another clip?) */ bool importEffects(std::weak_ptr service); bool removeFade(bool fromStart); /** @brief Adjust effects duration. Should be called after each resize / cut operation */ bool adjustEffectLength(bool adjustFromEnd, int oldIn, int newIn, int oldDuration, int duration, int offset, Fun &undo, Fun &redo, bool logUndo); bool adjustEffectLength(const QString &effectName, int duration, int originalDuration, Fun &undo, Fun &redo); void passTimelineProperties(const std::shared_ptr &other); KeyframeModel *getKeyframeModel(); int fadeIn() const; int fadeOut() const; /**@brief Tracks have two sub playlists to enable same track transitions. This returns the index of the sub-playlist containing this clip */ int getSubPlaylistIndex() const; void setSubPlaylistIndex(int index); friend class TrackModel; friend class TimelineModel; friend class TimelineItemModel; friend class TimelineController; friend struct TimelineFunctions; protected: Mlt::Producer *service() const override; /* @brief Performs a resize of the given clip. Returns true if the operation succeeded, and otherwise nothing is modified This method is protected because it shouldn't be called directly. Call the function in the timeline instead. If a snap point is within reach, the operation will be coerced to use it. @param size is the new size of the clip @param right is true if we change the right side of the clip, false otherwise @param undo Lambda function containing the current undo stack. Will be updated with current operation @param redo Lambda function containing the current redo queue. Will be updated with current operation */ bool requestResize(int size, bool right, Fun &undo, Fun &redo, bool logUndo = true) override; void setCurrentTrackId(int tid, bool finalMove = true) override; void setPosition(int pos) override; void setInOut(int in, int out) override; /* @brief This function change the global (timeline-wise) enabled state of the effects */ void setTimelineEffectsEnabled(bool enabled); /* @brief This functions should be called when the producer of the binClip changes, to allow refresh * @param state corresponds to the state of the clip we want (audio or video) * @param speed corresponds to the speed we need. Leave to 0 to keep current speed. Warning: this function doesn't notify the model. Unless you know what * you are doing, better use useTimewarProducer to change the speed */ void refreshProducerFromBin(PlaylistState::ClipState state, double speed = 0); void refreshProducerFromBin(); /* @brief This functions replaces the current producer with a slowmotion one It also resizes the producer so that set of frames contained in the clip is the same */ bool useTimewarpProducer(double speed, Fun &undo, Fun &redo); // @brief Lambda that merely changes the speed (in and out are untouched) Fun useTimewarpProducer_lambda(double speed); /** @brief Returns the marker model associated with this clip */ std::shared_ptr getMarkerModel() const; /** @brief Returns the number of audio channels for this clip */ int audioChannels() const; bool audioEnabled() const; bool isAudioOnly() const; double getSpeed() const; + /** @brief Returns the clip offset (calculated in the model between 2 clips from same bin clip */ + void setOffset(int offset); + /** @brief Clears the clip offset (calculated in the model between 2 clips from same bin clip */ + void clearOffset(); + int getOffset() const; + /*@brief This is a debug function to ensure the clip is in a valid state */ bool checkConsistency(); protected: std::shared_ptr m_producer; std::shared_ptr getProducer(); std::shared_ptr m_effectStack; std::shared_ptr m_clipMarkerModel; QString m_binClipId; // This is the Id of the bin clip this clip corresponds to. bool m_endlessResize; // Whether this clip can be freely resized bool forceThumbReload; // Used to trigger a forced thumb reload, when producer changes PlaylistState::ClipState m_currentState; ClipType::ProducerType m_clipType; double m_speed = -1; // Speed of the clip bool m_canBeVideo, m_canBeAudio; // Fake track id, used when dragging in insert/overwrite mode int m_fakeTrack; int m_fakePosition; + // Temporary val to store offset between two clips with same bin id. + int m_positionOffset; int m_subPlaylistIndex; // Tracks have two sub playlists to enable same track transitions, we store in which one this clip is. }; #endif diff --git a/src/timeline2/model/timelineitemmodel.cpp b/src/timeline2/model/timelineitemmodel.cpp index 1d521efa1..d66a74f71 100644 --- a/src/timeline2/model/timelineitemmodel.cpp +++ b/src/timeline2/model/timelineitemmodel.cpp @@ -1,617 +1,620 @@ /*************************************************************************** * Copyright (C) 2017 by Nicolas Carion * * This file is part of Kdenlive. See www.kdenlive.org. * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) version 3 or any later version accepted by the * * membership of KDE e.V. (or its successor approved by the membership * * of KDE e.V.), which shall act as a proxy defined in Section 14 of * * version 3 of the license. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see . * ***************************************************************************/ #include "timelineitemmodel.hpp" #include "assets/keyframes/model/keyframemodel.hpp" #include "bin/model/markerlistmodel.hpp" #include "clipmodel.hpp" #include "compositionmodel.hpp" #include "core.h" #include "doc/docundostack.hpp" #include "groupsmodel.hpp" #include "kdenlivesettings.h" #include "macros.hpp" #include "trackmodel.hpp" #include "snapmodel.hpp" #include "transitions/transitionsrepository.hpp" #include #include #include #include #include #include #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wsign-conversion" #pragma GCC diagnostic ignored "-Wfloat-equal" #pragma GCC diagnostic ignored "-Wshadow" #pragma GCC diagnostic ignored "-Wpedantic" #include #pragma GCC diagnostic pop RTTR_REGISTRATION { using namespace rttr; registration::class_("TimelineItemModel"); } TimelineItemModel::TimelineItemModel(Mlt::Profile *profile, std::weak_ptr undo_stack) : TimelineModel(profile, std::move(undo_stack)) { } void TimelineItemModel::finishConstruct(const std::shared_ptr &ptr, const std::shared_ptr &guideModel) { ptr->weak_this_ = ptr; ptr->m_groups = std::make_unique(ptr); guideModel->registerSnapModel(std::static_pointer_cast(ptr->m_snaps)); } std::shared_ptr TimelineItemModel::construct(Mlt::Profile *profile, std::shared_ptr guideModel, std::weak_ptr undo_stack) { std::shared_ptr ptr(new TimelineItemModel(profile, std::move(undo_stack))); finishConstruct(ptr, std::move(guideModel)); return ptr; } TimelineItemModel::~TimelineItemModel() = default; QModelIndex TimelineItemModel::index(int row, int column, const QModelIndex &parent) const { READ_LOCK(); QModelIndex result; if (parent.isValid()) { auto trackId = int(parent.internalId()); Q_ASSERT(isTrack(trackId)); int clipId = getTrackById_const(trackId)->getClipByRow(row); if (clipId != -1) { result = createIndex(row, 0, quintptr(clipId)); } else if (row < getTrackClipsCount(trackId) + getTrackCompositionsCount(trackId)) { int compoId = getTrackById_const(trackId)->getCompositionByRow(row); if (compoId != -1) { result = createIndex(row, 0, quintptr(compoId)); } } else { // Invalid index requested Q_ASSERT(false); } } else if (row < getTracksCount() && row >= 0) { // Get sort order // row = getTracksCount() - 1 - row; auto it = m_allTracks.cbegin(); std::advance(it, row); int trackId = (*it)->getId(); result = createIndex(row, column, quintptr(trackId)); } return result; } /*QModelIndex TimelineItemModel::makeIndex(int trackIndex, int clipIndex) const { return index(clipIndex, 0, index(trackIndex)); }*/ QModelIndex TimelineItemModel::makeClipIndexFromID(int clipId) const { Q_ASSERT(m_allClips.count(clipId) > 0); int trackId = m_allClips.at(clipId)->getCurrentTrackId(); if (trackId == -1) { // Clip is not inserted in a track qDebug() << "/// WARNING; INVALID CLIP INDEX REQUESTED: "<getRowfromClip(clipId); return index(row, 0, makeTrackIndexFromID(trackId)); } QModelIndex TimelineItemModel::makeCompositionIndexFromID(int compoId) const { Q_ASSERT(m_allCompositions.count(compoId) > 0); int trackId = m_allCompositions.at(compoId)->getCurrentTrackId(); return index(getTrackById_const(trackId)->getRowfromComposition(compoId), 0, makeTrackIndexFromID(trackId)); } QModelIndex TimelineItemModel::makeTrackIndexFromID(int trackId) const { // we retrieve iterator Q_ASSERT(m_iteratorTable.count(trackId) > 0); auto it = m_iteratorTable.at(trackId); int ind = (int)std::distance(m_allTracks.begin(), it); // Get sort order // ind = getTracksCount() - 1 - ind; return index(ind); } QModelIndex TimelineItemModel::parent(const QModelIndex &index) const { READ_LOCK(); // qDebug() << "TimelineItemModel::parent"<< index; if (index == QModelIndex()) { return index; } const int id = static_cast(index.internalId()); if (!index.isValid() || isTrack(id)) { return QModelIndex(); } if (isClip(id)) { const int trackId = getClipTrackId(id); return makeTrackIndexFromID(trackId); } if (isComposition(id)) { const int trackId = getCompositionTrackId(id); return makeTrackIndexFromID(trackId); } return {}; } int TimelineItemModel::rowCount(const QModelIndex &parent) const { READ_LOCK(); if (parent.isValid()) { const int id = (int)parent.internalId(); if (!isTrack(id)) { // clips don't have children // if it is not a track, it is something invalid return 0; } return getTrackClipsCount(id) + getTrackCompositionsCount(id); } return getTracksCount(); } int TimelineItemModel::columnCount(const QModelIndex &parent) const { Q_UNUSED(parent); return 1; } QHash TimelineItemModel::roleNames() const { QHash roles; roles[NameRole] = "name"; roles[ResourceRole] = "resource"; roles[ServiceRole] = "mlt_service"; roles[BinIdRole] = "binId"; roles[TrackIdRole] = "trackId"; roles[FakeTrackIdRole] = "fakeTrackId"; roles[FakePositionRole] = "fakePosition"; roles[StartRole] = "start"; roles[DurationRole] = "duration"; roles[MarkersRole] = "markers"; roles[KeyframesRole] = "keyframeModel"; roles[ShowKeyframesRole] = "showKeyframes"; roles[StatusRole] = "clipStatus"; roles[TypeRole] = "clipType"; roles[InPointRole] = "in"; roles[OutPointRole] = "out"; roles[FramerateRole] = "fps"; roles[GroupedRole] = "grouped"; roles[IsDisabledRole] = "disabled"; roles[IsAudioRole] = "audio"; roles[AudioLevelsRole] = "audioLevels"; roles[AudioChannelsRole] = "audioChannels"; roles[IsCompositeRole] = "composite"; roles[IsLockedRole] = "locked"; roles[FadeInRole] = "fadeIn"; roles[FadeOutRole] = "fadeOut"; roles[FileHashRole] = "hash"; roles[SpeedRole] = "speed"; roles[HeightRole] = "trackHeight"; roles[TrackTagRole] = "trackTag"; roles[ItemIdRole] = "item"; roles[ItemATrack] = "a_track"; roles[HasAudio] = "hasAudio"; roles[CanBeAudioRole] = "canBeAudio"; roles[CanBeVideoRole] = "canBeVideo"; roles[ReloadThumbRole] = "reloadThumb"; + roles[PositionOffsetRole] = "positionOffset"; roles[ThumbsFormatRole] = "thumbsFormat"; roles[AudioRecordRole] = "audioRecord"; roles[TrackActiveRole] = "trackActive"; roles[EffectNamesRole] = "effectNames"; roles[EffectsEnabledRole] = "isStackEnabled"; roles[GrabbedRole] = "isGrabbed"; return roles; } QVariant TimelineItemModel::data(const QModelIndex &index, int role) const { READ_LOCK(); if (!m_tractor || !index.isValid()) { // qDebug() << "DATA abort. Index validity="< clip = m_allClips.at(id); // Get data for a clip switch (role) { // TODO case NameRole: case Qt::DisplayRole: { QString result = clip->getProperty("kdenlive:clipname"); if (result.isEmpty()) { result = clip->getProperty("kdenlive:originalurl"); if (result.isEmpty()) { result = clip->getProperty("resource"); } if (!result.isEmpty()) { result = QFileInfo(result).fileName(); } else { result = clip->getProperty("mlt_service"); } } return result; } case ResourceRole: { QString result = clip->getProperty("resource"); if (result == QLatin1String("")) { result = clip->getProperty("mlt_service"); } return result; } case FakeTrackIdRole: return clip->getFakeTrackId(); case FakePositionRole: return clip->getFakePosition(); case BinIdRole: return clip->binId(); case TrackIdRole: return clip->getCurrentTrackId(); case ServiceRole: return clip->getProperty("mlt_service"); break; case AudioLevelsRole: // Dumb property to trigger audio thumbs reload return true; case AudioChannelsRole: return clip->audioChannels(); case HasAudio: return clip->audioEnabled(); case IsAudioRole: return clip->isAudioOnly(); case CanBeAudioRole: return clip->canBeAudio(); case CanBeVideoRole: return clip->canBeVideo(); case MarkersRole: { return QVariant::fromValue(clip->getMarkerModel().get()); } case KeyframesRole: { return QVariant::fromValue(clip->getKeyframeModel()); } case StatusRole: return QVariant::fromValue(clip->clipState()); case TypeRole: return QVariant::fromValue(clip->clipType()); case StartRole: return clip->getPosition(); case DurationRole: return clip->getPlaytime(); case GroupedRole: return m_groups->isInGroup(id); case EffectNamesRole: return clip->effectNames(); case InPointRole: return clip->getIn(); case OutPointRole: return clip->getOut(); case ShowKeyframesRole: return clip->showKeyframes(); case FadeInRole: return clip->fadeIn(); case FadeOutRole: return clip->fadeOut(); case ReloadThumbRole: return clip->forceThumbReload; + case PositionOffsetRole: + return clip->getOffset(); case SpeedRole: return clip->getSpeed(); case GrabbedRole: return clip->isGrabbed(); default: break; } } else if (isTrack(id)) { // qDebug() << "DATA REQUESTED FOR TRACK "<< id; switch (role) { case NameRole: case Qt::DisplayRole: { return getTrackById_const(id)->getProperty("kdenlive:track_name").toString(); } case TypeRole: return QVariant::fromValue(ClipType::ProducerType::Track); case DurationRole: // qDebug() << "DATA yielding duration" << m_tractor->get_playtime(); return getTrackById_const(id)->trackDuration(); case IsDisabledRole: // qDebug() << "DATA yielding mute" << 0; return getTrackById_const(id)->isAudioTrack() ? getTrackById_const(id)->isMute() : getTrackById_const(id)->isHidden(); case IsAudioRole: return getTrackById_const(id)->isAudioTrack(); case TrackTagRole: return getTrackTagById(id); case IsLockedRole: return getTrackById_const(id)->getProperty("kdenlive:locked_track").toInt() == 1; case HeightRole: { int collapsed = getTrackById_const(id)->getProperty("kdenlive:collapsed").toInt(); if (collapsed > 0) { return collapsed; } int height = getTrackById_const(id)->getProperty("kdenlive:trackheight").toInt(); // qDebug() << "DATA yielding height" << height; return (height > 0 ? height : 60); } case ThumbsFormatRole: return getTrackById_const(id)->getProperty("kdenlive:thumbs_format").toInt(); case IsCompositeRole: { case AudioRecordRole: return getTrackById_const(id)->getProperty("kdenlive:audio_rec").toInt(); } case TrackActiveRole: { return getTrackById_const(id)->isTimelineActive(); } case EffectNamesRole: { return getTrackById_const(id)->effectNames(); } case EffectsEnabledRole: { return getTrackById_const(id)->stackEnabled(); } default: break; } } else if (isComposition(id)) { std::shared_ptr compo = m_allCompositions.at(id); switch (role) { case NameRole: case Qt::DisplayRole: case ResourceRole: case ServiceRole: return compo->displayName(); break; case TypeRole: return QVariant::fromValue(ClipType::ProducerType::Composition); case StartRole: return compo->getPosition(); case TrackIdRole: return compo->getCurrentTrackId(); case DurationRole: return compo->getPlaytime(); case GroupedRole: return m_groups->isInGroup(id); case InPointRole: return 0; case OutPointRole: return 100; case BinIdRole: return 5; case KeyframesRole: { return QVariant::fromValue(compo->getEffectKeyframeModel()); } case ShowKeyframesRole: return compo->showKeyframes(); case ItemATrack: return compo->getForcedTrack(); case MarkersRole: { QVariantList markersList; return markersList; } case GrabbedRole: return compo->isGrabbed(); default: break; } } else { qDebug() << "UNKNOWN DATA requested " << index << roleNames()[role]; } return QVariant(); } void TimelineItemModel::setTrackProperty(int trackId, const QString &name, const QString &value) { std::shared_ptr track = getTrackById(trackId); track->setProperty(name, value); QVector roles; if (name == QLatin1String("kdenlive:track_name")) { roles.push_back(NameRole); } else if (name == QLatin1String("kdenlive:locked_track")) { roles.push_back(IsLockedRole); } else if (name == QLatin1String("hide")) { roles.push_back(IsDisabledRole); if (!track->isAudioTrack()) { pCore->requestMonitorRefresh(); } } else if (name == QLatin1String("kdenlive:timeline_active")) { roles.push_back(TrackActiveRole); } else if (name == QLatin1String("kdenlive:thumbs_format")) { roles.push_back(ThumbsFormatRole); } else if (name == QLatin1String("kdenlive:collapsed")) { roles.push_back(HeightRole); } else if (name == QLatin1String("kdenlive:audio_rec")) { roles.push_back(AudioRecordRole); } if (!roles.isEmpty()) { QModelIndex ix = makeTrackIndexFromID(trackId); emit dataChanged(ix, ix, roles); } } void TimelineItemModel::setTrackStackEnabled(int tid, bool enable) { std::shared_ptr track = getTrackById(tid); track->setEffectStackEnabled(enable); QModelIndex ix = makeTrackIndexFromID(tid); emit dataChanged(ix, ix, {TimelineModel::EffectsEnabledRole}); } void TimelineItemModel::importTrackEffects(int tid, std::weak_ptr service) { std::shared_ptr track = getTrackById(tid); track->importEffects(std::move(service)); } QVariant TimelineItemModel::getTrackProperty(int tid, const QString &name) const { return getTrackById_const(tid)->getProperty(name); } int TimelineItemModel::getFirstVideoTrackIndex() const { int trackId = -1; auto it = m_allTracks.cbegin(); while (it != m_allTracks.cend()) { trackId = (*it)->getId(); if (!(*it)->isAudioTrack()) { break; } ++it; } return trackId; } const QString TimelineItemModel::getTrackFullName(int tid) const { QString tag = getTrackTagById(tid); QString trackName = getTrackById_const(tid)->getProperty(QStringLiteral("kdenlive:track_name")).toString(); return trackName.isEmpty() ? tag : tag + QStringLiteral(" - ") + trackName; } const QString TimelineItemModel::groupsData() { return m_groups->toJson(); } bool TimelineItemModel::loadGroups(const QString &groupsData) { return m_groups->fromJson(groupsData); } void TimelineItemModel::notifyChange(const QModelIndex &topleft, const QModelIndex &bottomright, bool start, bool duration, bool updateThumb) { QVector roles; if (start) { roles.push_back(TimelineModel::StartRole); if (updateThumb) { roles.push_back(TimelineModel::InPointRole); } } if (duration) { roles.push_back(TimelineModel::DurationRole); if (updateThumb) { roles.push_back(TimelineModel::OutPointRole); } } emit dataChanged(topleft, bottomright, roles); } void TimelineItemModel::notifyChange(const QModelIndex &topleft, const QModelIndex &bottomright, const QVector &roles) { emit dataChanged(topleft, bottomright, roles); } void TimelineItemModel::buildTrackCompositing(bool rebuild) { auto it = m_allTracks.cbegin(); QScopedPointer field(m_tractor->field()); field->lock(); // Make sure all previous track compositing is removed if (rebuild) { QScopedPointer service(new Mlt::Service(field->get_service())); while ((service != nullptr) && service->is_valid()) { if (service->type() == transition_type) { Mlt::Transition t((mlt_transition)service->get_service()); QString serviceName = t.get("mlt_service"); if (t.get_int("internal_added") == 237) { // remove all compositing transitions field->disconnect_service(t); } } service.reset(service->producer()); } } QString composite = TransitionsRepository::get()->getCompositingTransition(); while (it != m_allTracks.cend()) { int trackId = getTrackMltIndex((*it)->getId()); if (!composite.isEmpty() && !(*it)->isAudioTrack()) { // video track, add composition std::unique_ptr transition = TransitionsRepository::get()->getTransition(composite); transition->set("internal_added", 237); transition->set("always_active", 1); field->plant_transition(*transition, 0, trackId); transition->set_tracks(0, trackId); } else if ((*it)->isAudioTrack()) { // audio mix std::unique_ptr transition = TransitionsRepository::get()->getTransition(QStringLiteral("mix")); transition->set("internal_added", 237); transition->set("always_active", 1); transition->set("sum", 1); field->plant_transition(*transition, 0, trackId); transition->set_tracks(0, trackId); } ++it; } field->unlock(); if (composite.isEmpty()) { pCore->displayMessage(i18n("Could not setup track compositing, check your install"), MessageType::ErrorMessage); } } void TimelineItemModel::notifyChange(const QModelIndex &topleft, const QModelIndex &bottomright, int role) { emit dataChanged(topleft, bottomright, {role}); } void TimelineItemModel::_beginRemoveRows(const QModelIndex &i, int j, int k) { // qDebug()<<"FORWARDING beginRemoveRows"<. * ***************************************************************************/ #include "timelinemodel.hpp" #include "assets/model/assetparametermodel.hpp" #include "bin/projectclip.h" #include "bin/projectitemmodel.h" #include "clipmodel.hpp" #include "compositionmodel.hpp" #include "core.h" #include "doc/docundostack.hpp" #include "effects/effectsrepository.hpp" #include "effects/effectstack/model/effectstackmodel.hpp" #include "groupsmodel.hpp" #include "kdenlivesettings.h" #include "logger.hpp" #include "snapmodel.hpp" #include "timelinefunctions.hpp" #include "trackmodel.hpp" #include #include #include #include #include #include #include #include #include #include "macros.hpp" #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wsign-conversion" #pragma GCC diagnostic ignored "-Wfloat-equal" #pragma GCC diagnostic ignored "-Wshadow" #pragma GCC diagnostic ignored "-Wpedantic" #include #pragma GCC diagnostic pop RTTR_REGISTRATION { using namespace rttr; registration::class_("TimelineModel") .method("requestClipMove", select_overload(&TimelineModel::requestClipMove))( parameter_names("clipId", "trackId", "position", "updateView", "logUndo", "invalidateTimeline")) .method("requestCompositionMove", select_overload(&TimelineModel::requestCompositionMove))( parameter_names("compoId", "trackId", "position", "updateView", "logUndo")) .method("requestClipInsertion", select_overload(&TimelineModel::requestClipInsertion))( parameter_names("binClipId", "trackId", "position", "id", "logUndo", "refreshView", "useTargets")) .method("requestItemDeletion", select_overload(&TimelineModel::requestItemDeletion))(parameter_names("clipId", "logUndo")) .method("requestGroupMove", select_overload(&TimelineModel::requestGroupMove))( parameter_names("itemId", "groupId", "delta_track", "delta_pos", "updateView", "logUndo")) .method("requestGroupDeletion", select_overload(&TimelineModel::requestGroupDeletion))(parameter_names("clipId", "logUndo")) .method("requestItemResize", select_overload(&TimelineModel::requestItemResize))( parameter_names("itemId", "size", "right", "logUndo", "snapDistance", "allowSingleResize")) .method("requestClipsGroup", select_overload &, bool, GroupType)>(&TimelineModel::requestClipsGroup))( parameter_names("itemIds", "logUndo", "type")) .method("requestClipUngroup", select_overload(&TimelineModel::requestClipUngroup))(parameter_names("itemId", "logUndo")) .method("requestClipsUngroup", &TimelineModel::requestClipsUngroup)(parameter_names("itemIds", "logUndo")) .method("requestTrackInsertion", select_overload(&TimelineModel::requestTrackInsertion))( parameter_names("pos", "id", "trackName", "audioTrack")) .method("requestTrackDeletion", select_overload(&TimelineModel::requestTrackDeletion))(parameter_names("trackId")) .method("requestClearSelection", select_overload(&TimelineModel::requestClearSelection))(parameter_names("onDeletion")) .method("requestAddToSelection", &TimelineModel::requestAddToSelection)(parameter_names("itemId", "clear")) .method("requestRemoveFromSelection", &TimelineModel::requestRemoveFromSelection)(parameter_names("itemId")) .method("requestSetSelection", select_overload &)>(&TimelineModel::requestSetSelection))(parameter_names("itemIds")) .method("requestFakeClipMove", select_overload(&TimelineModel::requestFakeClipMove))( parameter_names("clipId", "trackId", "position", "updateView", "logUndo", "invalidateTimeline")) .method("requestFakeGroupMove", select_overload(&TimelineModel::requestFakeGroupMove))( parameter_names("clipId", "groupId", "delta_track", "delta_pos", "updateView", "logUndo")) // (parameter_names("clipId", "groupId", "delta_track", "delta_pos", "updateView" "logUndo")) .method("suggestClipMove", &TimelineModel::suggestClipMove)(parameter_names("clipId", "trackId", "position", "cursorPosition", "snapDistance")) .method("suggestCompositionMove", &TimelineModel::suggestCompositionMove)(parameter_names("compoId", "trackId", "position", "cursorPosition", "snapDistance")) // .method("addSnap", &TimelineModel::addSnap)(parameter_names("pos")) // .method("removeSnap", &TimelineModel::addSnap)(parameter_names("pos")) // .method("requestCompositionInsertion", select_overload, int &, bool)>( // &TimelineModel::requestCompositionInsertion))( // parameter_names("transitionId", "trackId", "position", "length", "transProps", "id", "logUndo")) .method("requestClipTimeWarp", select_overload(&TimelineModel::requestClipTimeWarp))(parameter_names("clipId", "speed")); } int TimelineModel::next_id = 0; int TimelineModel::seekDuration = 30000; TimelineModel::TimelineModel(Mlt::Profile *profile, std::weak_ptr undo_stack) : QAbstractItemModel_shared_from_this() , m_tractor(new Mlt::Tractor(*profile)) , m_snaps(new SnapModel()) , m_undoStack(std::move(undo_stack)) , m_profile(profile) , m_blackClip(new Mlt::Producer(*profile, "color:black")) , m_lock(QReadWriteLock::Recursive) , m_timelineEffectsEnabled(true) , m_id(getNextId()) , m_overlayTrackCount(-1) , m_audioTarget(-1) , m_videoTarget(-1) , m_editMode(TimelineMode::NormalEdit) , m_blockRefresh(false) { // Create black background track m_blackClip->set("id", "black_track"); m_blackClip->set("mlt_type", "producer"); m_blackClip->set("aspect_ratio", 1); m_blackClip->set("length", INT_MAX); m_blackClip->set("set.test_audio", 0); m_blackClip->set_in_and_out(0, TimelineModel::seekDuration); m_tractor->insert_track(*m_blackClip, 0); TRACE_CONSTR(this); } TimelineModel::~TimelineModel() { std::vector all_ids; for (auto tracks : m_iteratorTable) { all_ids.push_back(tracks.first); } for (auto tracks : all_ids) { deregisterTrack_lambda(tracks, false)(); } for (const auto &clip : m_allClips) { clip.second->deregisterClipToBin(); } } int TimelineModel::getTracksCount() const { READ_LOCK(); int count = m_tractor->count(); if (m_overlayTrackCount > -1) { count -= m_overlayTrackCount; } Q_ASSERT(count >= 0); // don't count the black background track Q_ASSERT(count - 1 == static_cast(m_allTracks.size())); return count - 1; } int TimelineModel::getTrackIndexFromPosition(int pos) const { Q_ASSERT(pos >= 0 && pos < (int)m_allTracks.size()); READ_LOCK(); auto it = m_allTracks.begin(); while (pos > 0) { it++; pos--; } return (*it)->getId(); } int TimelineModel::getClipsCount() const { READ_LOCK(); int size = int(m_allClips.size()); return size; } int TimelineModel::getCompositionsCount() const { READ_LOCK(); int size = int(m_allCompositions.size()); return size; } int TimelineModel::getClipTrackId(int clipId) const { READ_LOCK(); Q_ASSERT(m_allClips.count(clipId) > 0); const auto clip = m_allClips.at(clipId); return clip->getCurrentTrackId(); } int TimelineModel::getCompositionTrackId(int compoId) const { Q_ASSERT(m_allCompositions.count(compoId) > 0); const auto trans = m_allCompositions.at(compoId); return trans->getCurrentTrackId(); } int TimelineModel::getItemTrackId(int itemId) const { READ_LOCK(); Q_ASSERT(isItem(itemId)); if (isComposition(itemId)) { return getCompositionTrackId(itemId); } return getClipTrackId(itemId); } int TimelineModel::getClipPosition(int clipId) const { READ_LOCK(); Q_ASSERT(m_allClips.count(clipId) > 0); const auto clip = m_allClips.at(clipId); int pos = clip->getPosition(); return pos; } double TimelineModel::getClipSpeed(int clipId) const { READ_LOCK(); Q_ASSERT(m_allClips.count(clipId) > 0); return m_allClips.at(clipId)->getSpeed(); } int TimelineModel::getClipSplitPartner(int clipId) const { READ_LOCK(); Q_ASSERT(m_allClips.count(clipId) > 0); return m_groups->getSplitPartner(clipId); } int TimelineModel::getClipIn(int clipId) const { READ_LOCK(); Q_ASSERT(m_allClips.count(clipId) > 0); const auto clip = m_allClips.at(clipId); return clip->getIn(); } PlaylistState::ClipState TimelineModel::getClipState(int clipId) const { READ_LOCK(); Q_ASSERT(m_allClips.count(clipId) > 0); const auto clip = m_allClips.at(clipId); return clip->clipState(); } const QString TimelineModel::getClipBinId(int clipId) const { READ_LOCK(); Q_ASSERT(m_allClips.count(clipId) > 0); const auto clip = m_allClips.at(clipId); QString id = clip->binId(); return id; } int TimelineModel::getClipPlaytime(int clipId) const { READ_LOCK(); Q_ASSERT(isClip(clipId)); const auto clip = m_allClips.at(clipId); int playtime = clip->getPlaytime(); return playtime; } QSize TimelineModel::getClipFrameSize(int clipId) const { READ_LOCK(); Q_ASSERT(isClip(clipId)); const auto clip = m_allClips.at(clipId); return clip->getFrameSize(); } int TimelineModel::getTrackClipsCount(int trackId) const { READ_LOCK(); Q_ASSERT(isTrack(trackId)); int count = getTrackById_const(trackId)->getClipsCount(); return count; } int TimelineModel::getClipByPosition(int trackId, int position) const { READ_LOCK(); Q_ASSERT(isTrack(trackId)); return getTrackById_const(trackId)->getClipByPosition(position); } int TimelineModel::getCompositionByPosition(int trackId, int position) const { READ_LOCK(); Q_ASSERT(isTrack(trackId)); return getTrackById_const(trackId)->getCompositionByPosition(position); } int TimelineModel::getTrackPosition(int trackId) const { READ_LOCK(); Q_ASSERT(isTrack(trackId)); auto it = m_allTracks.begin(); int pos = (int)std::distance(it, (decltype(it))m_iteratorTable.at(trackId)); return pos; } int TimelineModel::getTrackMltIndex(int trackId) const { READ_LOCK(); // Because of the black track that we insert in first position, the mlt index is the position + 1 return getTrackPosition(trackId) + 1; } int TimelineModel::getTrackSortValue(int trackId, bool separated) const { if (separated) { return getTrackPosition(trackId) + 1; } auto it = m_allTracks.end(); int aCount = 0; int vCount = 0; bool isAudio = false; int trackPos = 0; while (it != m_allTracks.begin()) { --it; bool audioTrack = (*it)->isAudioTrack(); if (audioTrack) { aCount++; } else { vCount++; } if (trackId == (*it)->getId()) { isAudio = audioTrack; trackPos = audioTrack ? aCount : vCount; } } int trackDiff = aCount - vCount; if (trackDiff > 0) { // more audio tracks if (!isAudio) { trackPos -= trackDiff; } else if (trackPos > vCount) { return -trackPos; } } return isAudio ? ((aCount * trackPos) - 1) : (vCount + 1 - trackPos) * 2; } QList TimelineModel::getLowerTracksId(int trackId, TrackType type) const { READ_LOCK(); Q_ASSERT(isTrack(trackId)); QList results; auto it = m_iteratorTable.at(trackId); while (it != m_allTracks.begin()) { --it; if (type == TrackType::AnyTrack) { results << (*it)->getId(); continue; } bool audioTrack = (*it)->isAudioTrack(); if (type == TrackType::AudioTrack && audioTrack) { results << (*it)->getId(); } else if (type == TrackType::VideoTrack && !audioTrack) { results << (*it)->getId(); } } return results; } int TimelineModel::getPreviousVideoTrackIndex(int trackId) const { READ_LOCK(); Q_ASSERT(isTrack(trackId)); auto it = m_iteratorTable.at(trackId); while (it != m_allTracks.begin()) { --it; if (it != m_allTracks.begin() && !(*it)->isAudioTrack()) { break; } } return it == m_allTracks.begin() ? 0 : (*it)->getId(); } int TimelineModel::getPreviousVideoTrackPos(int trackId) const { READ_LOCK(); Q_ASSERT(isTrack(trackId)); auto it = m_iteratorTable.at(trackId); while (it != m_allTracks.begin()) { --it; if (it != m_allTracks.begin() && !(*it)->isAudioTrack()) { break; } } return it == m_allTracks.begin() ? 0 : getTrackMltIndex((*it)->getId()); } int TimelineModel::getMirrorVideoTrackId(int trackId) const { READ_LOCK(); Q_ASSERT(isTrack(trackId)); auto it = m_iteratorTable.at(trackId); if (!(*it)->isAudioTrack()) { // we expected an audio track... return -1; } int count = 0; if (it != m_allTracks.end()) { ++it; } while (it != m_allTracks.end()) { if ((*it)->isAudioTrack()) { count++; } else { if (count == 0) { return (*it)->getId(); } count--; } ++it; } if (it != m_allTracks.end() && !(*it)->isAudioTrack() && count == 0) { return (*it)->getId(); } return -1; } int TimelineModel::getMirrorTrackId(int trackId) const { if (isAudioTrack(trackId)) { return getMirrorVideoTrackId(trackId); } return getMirrorAudioTrackId(trackId); } int TimelineModel::getMirrorAudioTrackId(int trackId) const { READ_LOCK(); Q_ASSERT(isTrack(trackId)); auto it = m_iteratorTable.at(trackId); if ((*it)->isAudioTrack()) { // we expected a video track... return -1; } int count = 0; if (it != m_allTracks.begin()) { --it; } while (it != m_allTracks.begin()) { if (!(*it)->isAudioTrack()) { count++; } else { if (count == 0) { return (*it)->getId(); } count--; } --it; } if ((*it)->isAudioTrack() && count == 0) { return (*it)->getId(); } return -1; } void TimelineModel::setEditMode(TimelineMode::EditMode mode) { m_editMode = mode; } bool TimelineModel::normalEdit() const { return m_editMode == TimelineMode::NormalEdit; } bool TimelineModel::requestFakeClipMove(int clipId, int trackId, int position, bool updateView, bool invalidateTimeline, Fun &undo, Fun &redo) { Q_UNUSED(updateView); Q_UNUSED(invalidateTimeline); Q_UNUSED(undo); Q_UNUSED(redo); Q_ASSERT(isClip(clipId)); m_allClips[clipId]->setFakePosition(position); bool trackChanged = false; if (trackId > -1) { if (trackId != m_allClips[clipId]->getFakeTrackId()) { if (getTrackById_const(trackId)->trackType() == m_allClips[clipId]->clipState()) { m_allClips[clipId]->setFakeTrackId(trackId); trackChanged = true; } } } QModelIndex modelIndex = makeClipIndexFromID(clipId); if (modelIndex.isValid()) { QVector roles{FakePositionRole}; if (trackChanged) { roles << FakeTrackIdRole; } notifyChange(modelIndex, modelIndex, roles); return true; } return false; } bool TimelineModel::requestClipMove(int clipId, int trackId, int position, bool updateView, bool invalidateTimeline, Fun &undo, Fun &redo) { // qDebug() << "// FINAL MOVE: " << invalidateTimeline << ", UPDATE VIEW: " << updateView; if (trackId == -1) { return false; } Q_ASSERT(isClip(clipId)); if (m_allClips[clipId]->clipState() == PlaylistState::Disabled) { if (getTrackById_const(trackId)->trackType() == PlaylistState::AudioOnly && !m_allClips[clipId]->canBeAudio()) { return false; } if (getTrackById_const(trackId)->trackType() == PlaylistState::VideoOnly && !m_allClips[clipId]->canBeVideo()) { return false; } } else if (getTrackById_const(trackId)->trackType() != m_allClips[clipId]->clipState()) { // Move not allowed (audio / video mismatch) qDebug() << "// CLIP MISMATCH: " << getTrackById_const(trackId)->trackType() << " == " << m_allClips[clipId]->clipState(); return false; } std::function local_undo = []() { return true; }; std::function local_redo = []() { return true; }; bool ok = true; int old_trackId = getClipTrackId(clipId); bool notifyViewOnly = false; // qDebug()<<"MOVING CLIP FROM: "< 0); if (m_allClips[clipId]->getPosition() == position && getClipTrackId(clipId) == trackId) { TRACE_RES(true); return true; } if (m_groups->isInGroup(clipId)) { // element is in a group. int groupId = m_groups->getRootId(clipId); int current_trackId = getClipTrackId(clipId); int track_pos1 = getTrackPosition(trackId); int track_pos2 = getTrackPosition(current_trackId); int delta_track = track_pos1 - track_pos2; int delta_pos = position - m_allClips[clipId]->getPosition(); bool res = requestFakeGroupMove(clipId, groupId, delta_track, delta_pos, updateView, logUndo); TRACE_RES(res); return res; } std::function undo = []() { return true; }; std::function redo = []() { return true; }; bool res = requestFakeClipMove(clipId, trackId, position, updateView, invalidateTimeline, undo, redo); if (res && logUndo) { PUSH_UNDO(undo, redo, i18n("Move clip")); } TRACE_RES(res); return res; } bool TimelineModel::requestClipMove(int clipId, int trackId, int position, bool updateView, bool logUndo, bool invalidateTimeline) { QWriteLocker locker(&m_lock); TRACE(clipId, trackId, position, updateView, logUndo, invalidateTimeline); Q_ASSERT(m_allClips.count(clipId) > 0); if (m_allClips[clipId]->getPosition() == position && getClipTrackId(clipId) == trackId) { TRACE_RES(true); return true; } if (m_groups->isInGroup(clipId)) { // element is in a group. int groupId = m_groups->getRootId(clipId); int current_trackId = getClipTrackId(clipId); int track_pos1 = getTrackPosition(trackId); int track_pos2 = getTrackPosition(current_trackId); int delta_track = track_pos1 - track_pos2; int delta_pos = position - m_allClips[clipId]->getPosition(); return requestGroupMove(clipId, groupId, delta_track, delta_pos, updateView, logUndo); } std::function undo = []() { return true; }; std::function redo = []() { return true; }; bool res = requestClipMove(clipId, trackId, position, updateView, invalidateTimeline, undo, redo); if (res && logUndo) { PUSH_UNDO(undo, redo, i18n("Move clip")); } TRACE_RES(res); return res; } bool TimelineModel::requestClipMoveAttempt(int clipId, int trackId, int position) { QWriteLocker locker(&m_lock); Q_ASSERT(m_allClips.count(clipId) > 0); if (m_allClips[clipId]->getPosition() == position && getClipTrackId(clipId) == trackId) { return true; } std::function undo = []() { return true; }; std::function redo = []() { return true; }; bool res = true; if (m_groups->isInGroup(clipId)) { // element is in a group. int groupId = m_groups->getRootId(clipId); int current_trackId = getClipTrackId(clipId); int track_pos1 = getTrackPosition(trackId); int track_pos2 = getTrackPosition(current_trackId); int delta_track = track_pos1 - track_pos2; int delta_pos = position - m_allClips[clipId]->getPosition(); res = requestGroupMove(clipId, groupId, delta_track, delta_pos, false, false, undo, redo, false); } else { res = requestClipMove(clipId, trackId, position, false, false, undo, redo); } if (res) { undo(); } return res; } int TimelineModel::suggestItemMove(int itemId, int trackId, int position, int cursorPosition, int snapDistance) { if (isClip(itemId)) { return suggestClipMove(itemId, trackId, position, cursorPosition, snapDistance); } return suggestCompositionMove(itemId, trackId, position, cursorPosition, snapDistance); } int TimelineModel::suggestClipMove(int clipId, int trackId, int position, int cursorPosition, int snapDistance) { QWriteLocker locker(&m_lock); TRACE(clipId, trackId, position, cursorPosition, snapDistance); Q_ASSERT(isClip(clipId)); Q_ASSERT(isTrack(trackId)); int currentPos = getClipPosition(clipId); int sourceTrackId = getClipTrackId(clipId); if (sourceTrackId > -1 && getTrackById_const(trackId)->isAudioTrack() != getTrackById_const(sourceTrackId)->isAudioTrack()) { // Trying move on incompatible track type, stay on same track trackId = sourceTrackId; } if (currentPos == position && sourceTrackId == trackId) { TRACE_RES(position); return position; } bool after = position > currentPos; if (snapDistance > 0) { // For snapping, we must ignore all in/outs of the clips of the group being moved std::vector ignored_pts; std::unordered_set all_items = {clipId}; if (m_groups->isInGroup(clipId)) { int groupId = m_groups->getRootId(clipId); all_items = m_groups->getLeaves(groupId); } for (int current_clipId : all_items) { if (getItemTrackId(current_clipId) != -1) { int in = getItemPosition(current_clipId); int out = in + getItemPlaytime(current_clipId); ignored_pts.push_back(in); ignored_pts.push_back(out); } } int snapped = getBestSnapPos(position, m_allClips[clipId]->getPlaytime(), m_editMode == TimelineMode::NormalEdit ? ignored_pts : std::vector(), cursorPosition, snapDistance); // qDebug() << "Starting suggestion " << clipId << position << currentPos << "snapped to " << snapped; if (snapped >= 0) { position = snapped; } } // we check if move is possible bool possible = m_editMode == TimelineMode::NormalEdit ? requestClipMove(clipId, trackId, position, true, false, false) : requestFakeClipMove(clipId, trackId, position, true, false, false); /*} else { possible = requestClipMoveAttempt(clipId, trackId, position); }*/ if (possible) { TRACE_RES(position); return position; } if (sourceTrackId == -1) { // not clear what to do hear, if the current move doesn't work. We could try to find empty space, but it might end up being far away... TRACE_RES(currentPos); return currentPos; } // Find best possible move if (!m_groups->isInGroup(clipId)) { // Try same track move if (trackId != sourceTrackId && sourceTrackId != -1) { qDebug() << "// TESTING SAME TRACVK MOVE: " << trackId << " = " << sourceTrackId; trackId = sourceTrackId; possible = requestClipMove(clipId, trackId, position, true, false, false); if (!possible) { qDebug() << "CANNOT MOVE CLIP : " << clipId << " ON TK: " << trackId << ", AT POS: " << position; } else { TRACE_RES(position); return position; } } int blank_length = getTrackById(trackId)->getBlankSizeNearClip(clipId, after); qDebug() << "Found blank" << blank_length; if (blank_length < INT_MAX) { if (after) { position = currentPos + blank_length; } else { position = currentPos - blank_length; } } else { TRACE_RES(currentPos); return currentPos; } possible = requestClipMove(clipId, trackId, position, true, false, false); TRACE_RES(possible ? position : currentPos); return possible ? position : currentPos; } // find best pos for groups int groupId = m_groups->getRootId(clipId); std::unordered_set all_items = m_groups->getLeaves(groupId); QMap trackPosition; // First pass, sort clips by track and keep only the first / last depending on move direction for (int current_clipId : all_items) { int clipTrack = getItemTrackId(current_clipId); if (clipTrack == -1) { continue; } int in = getItemPosition(current_clipId); if (trackPosition.contains(clipTrack)) { if (after) { // keep only last clip position for track int out = in + getItemPlaytime(current_clipId); if (trackPosition.value(clipTrack) < out) { trackPosition.insert(clipTrack, out); } } else { // keep only first clip position for track if (trackPosition.value(clipTrack) > in) { trackPosition.insert(clipTrack, in); } } } else { trackPosition.insert(clipTrack, after ? in + getItemPlaytime(current_clipId) : in); } } // Now check space on each track QMapIterator i(trackPosition); int blank_length = -1; while (i.hasNext()) { i.next(); int track_space; if (!after) { // Check space before the position track_space = i.value() - getTrackById(i.key())->getBlankStart(i.value() - 1); if (blank_length == -1 || blank_length > track_space) { blank_length = track_space; } } else { // Check space after the position track_space = getTrackById(i.key())->getBlankEnd(i.value() + 1) - i.value() - 1; if (blank_length == -1 || blank_length > track_space) { blank_length = track_space; } } } if (blank_length != 0) { int updatedPos = currentPos + (after ? blank_length : -blank_length); possible = requestClipMove(clipId, trackId, updatedPos, true, false, false); if (possible) { TRACE_RES(updatedPos); return updatedPos; } } TRACE_RES(currentPos); return currentPos; } int TimelineModel::suggestCompositionMove(int compoId, int trackId, int position, int cursorPosition, int snapDistance) { QWriteLocker locker(&m_lock); TRACE(compoId, trackId, position, cursorPosition, snapDistance); Q_ASSERT(isComposition(compoId)); Q_ASSERT(isTrack(trackId)); int currentPos = getCompositionPosition(compoId); int currentTrack = getCompositionTrackId(compoId); if (getTrackById_const(trackId)->isAudioTrack()) { // Trying move on incompatible track type, stay on same track trackId = currentTrack; } if (currentPos == position && currentTrack == trackId) { TRACE_RES(position); return position; } if (snapDistance > 0) { // For snapping, we must ignore all in/outs of the clips of the group being moved std::vector ignored_pts; if (m_groups->isInGroup(compoId)) { int groupId = m_groups->getRootId(compoId); auto all_items = m_groups->getLeaves(groupId); for (int current_compoId : all_items) { // TODO: fix for composition int in = getItemPosition(current_compoId); int out = in + getItemPlaytime(current_compoId); ignored_pts.push_back(in); ignored_pts.push_back(out); } } else { int in = currentPos; int out = in + getCompositionPlaytime(compoId); qDebug() << " * ** IGNORING SNAP PTS: " << in << "-" << out; ignored_pts.push_back(in); ignored_pts.push_back(out); } int snapped = getBestSnapPos(position, m_allCompositions[compoId]->getPlaytime(), ignored_pts, cursorPosition, snapDistance); qDebug() << "Starting suggestion " << compoId << position << currentPos << "snapped to " << snapped; if (snapped >= 0) { position = snapped; } } // we check if move is possible bool possible = requestCompositionMove(compoId, trackId, position, true, false); qDebug() << "Original move success" << possible; if (possible) { TRACE_RES(position); return position; } /*bool after = position > currentPos; int blank_length = getTrackById(trackId)->getBlankSizeNearComposition(compoId, after); qDebug() << "Found blank" << blank_length; if (blank_length < INT_MAX) { if (after) { return currentPos + blank_length; } return currentPos - blank_length; } return position;*/ TRACE_RES(currentPos); return currentPos; } bool TimelineModel::requestClipCreation(const QString &binClipId, int &id, PlaylistState::ClipState state, double speed, Fun &undo, Fun &redo) { qDebug() << "requestClipCreation " << binClipId; QString bid = binClipId; if (binClipId.contains(QLatin1Char('/'))) { bid = binClipId.section(QLatin1Char('/'), 0, 0); } if (!pCore->projectItemModel()->hasClip(bid)) { qDebug() << " / / / /MASTER CLIP NOT FOUND"; return false; } std::shared_ptr master = pCore->projectItemModel()->getClipByBinID(bid); if (!master->isReady() || !master->isCompatible(state)) { qDebug() << "// CLIP NOT READY OR NOT COMPATIBLE: " << state; return false; } int clipId = TimelineModel::getNextId(); id = clipId; Fun local_undo = deregisterClip_lambda(clipId); ClipModel::construct(shared_from_this(), bid, clipId, state, speed); auto clip = m_allClips[clipId]; Fun local_redo = [clip, this, state]() { // We capture a shared_ptr to the clip, which means that as long as this undo object lives, the clip object is not deleted. To insert it back it is // sufficient to register it. registerClip(clip, true); clip->refreshProducerFromBin(state); return true; }; if (binClipId.contains(QLatin1Char('/'))) { int in = binClipId.section(QLatin1Char('/'), 1, 1).toInt(); int out = binClipId.section(QLatin1Char('/'), 2, 2).toInt(); int initLength = m_allClips[clipId]->getPlaytime(); bool res = true; if (in != 0) { res = requestItemResize(clipId, initLength - in, false, true, local_undo, local_redo); } res = res && requestItemResize(clipId, out - in + 1, true, true, local_undo, local_redo); if (!res) { bool undone = local_undo(); Q_ASSERT(undone); return false; } } UPDATE_UNDO_REDO(local_redo, local_undo, undo, redo); return true; } bool TimelineModel::requestClipInsertion(const QString &binClipId, int trackId, int position, int &id, bool logUndo, bool refreshView, bool useTargets) { QWriteLocker locker(&m_lock); TRACE(binClipId, trackId, position, id, logUndo, refreshView, useTargets); Fun undo = []() { return true; }; Fun redo = []() { return true; }; bool result = requestClipInsertion(binClipId, trackId, position, id, logUndo, refreshView, useTargets, undo, redo); if (result && logUndo) { PUSH_UNDO(undo, redo, i18n("Insert Clip")); } TRACE_RES(result); return result; } bool TimelineModel::requestClipInsertion(const QString &binClipId, int trackId, int position, int &id, bool logUndo, bool refreshView, bool useTargets, Fun &undo, Fun &redo) { Fun local_undo = []() { return true; }; Fun local_redo = []() { return true; }; qDebug() << "requestClipInsertion " << binClipId << " " << " " << trackId << " " << position; bool res = false; ClipType::ProducerType type = ClipType::Unknown; QString bid = binClipId.section(QLatin1Char('/'), 0, 0); // dropType indicates if we want a normal drop (disabled), audio only or video only drop PlaylistState::ClipState dropType = PlaylistState::Disabled; if (bid.startsWith(QLatin1Char('A'))) { dropType = PlaylistState::AudioOnly; bid = bid.remove(0, 1); } else if (bid.startsWith(QLatin1Char('V'))) { dropType = PlaylistState::VideoOnly; bid = bid.remove(0, 1); } if (!pCore->projectItemModel()->hasClip(bid)) { return false; } std::shared_ptr master = pCore->projectItemModel()->getClipByBinID(bid); type = master->clipType(); if (useTargets && m_audioTarget == -1 && m_videoTarget == -1) { useTargets = false; } if (dropType == PlaylistState::Disabled && (type == ClipType::AV || type == ClipType::Playlist)) { if (m_audioTarget >= 0 && m_videoTarget == -1 && useTargets) { // If audio target is set but no video target, only insert audio trackId = m_audioTarget; if (trackId > -1 && getTrackById_const(trackId)->isLocked()) { trackId = -1; } } else if (useTargets && getTrackById_const(trackId)->isLocked()) { // Video target set but locked trackId = m_audioTarget; if (trackId > -1 && getTrackById_const(trackId)->isLocked()) { trackId = -1; } } if (trackId == -1) { pCore->displayMessage(i18n("No available track for insert operation"), ErrorMessage); return false; } bool audioDrop = getTrackById_const(trackId)->isAudioTrack(); res = requestClipCreation(binClipId, id, getTrackById_const(trackId)->trackType(), 1.0, local_undo, local_redo); res = res && requestClipMove(id, trackId, position, refreshView, logUndo, local_undo, local_redo); int target_track; if (audioDrop) { target_track = m_videoTarget == -1 ? -1 : getTrackById_const(m_videoTarget)->isLocked() ? -1 : m_videoTarget; } else { target_track = m_audioTarget == -1 ? -1 : getTrackById_const(m_audioTarget)->isLocked() ? -1 : m_audioTarget; } qDebug() << "CLIP HAS A+V: " << master->hasAudioAndVideo(); int mirror = getMirrorTrackId(trackId); if (mirror > -1 && getTrackById_const(mirror)->isLocked()) { mirror = -1; } bool canMirrorDrop = !useTargets && mirror > -1; if (res && (canMirrorDrop || target_track > -1) && master->hasAudioAndVideo()) { if (!useTargets) { target_track = mirror; } // QList possibleTracks = m_audioTarget >= 0 ? QList() << m_audioTarget : getLowerTracksId(trackId, TrackType::AudioTrack); QList possibleTracks; qDebug() << "CREATING SPLIT " << target_track << " usetargets" << useTargets; if (target_track >= 0 && !getTrackById_const(target_track)->isLocked()) { possibleTracks << target_track; } if (possibleTracks.isEmpty()) { // No available audio track for splitting, abort pCore->displayMessage(i18n("No available track for split operation"), ErrorMessage); res = false; } else { std::function audio_undo = []() { return true; }; std::function audio_redo = []() { return true; }; int newId; res = requestClipCreation(binClipId, newId, audioDrop ? PlaylistState::VideoOnly : PlaylistState::AudioOnly, 1.0, audio_undo, audio_redo); if (res) { bool move = false; while (!move && !possibleTracks.isEmpty()) { int newTrack = possibleTracks.takeFirst(); move = requestClipMove(newId, newTrack, position, true, false, audio_undo, audio_redo); } // use lazy evaluation to group only if move was successful res = res && move && requestClipsGroup({id, newId}, audio_undo, audio_redo, GroupType::AVSplit); if (!res || !move) { pCore->displayMessage(i18n("Audio split failed: no viable track"), ErrorMessage); bool undone = audio_undo(); Q_ASSERT(undone); } else { UPDATE_UNDO_REDO(audio_redo, audio_undo, local_undo, local_redo); } } else { pCore->displayMessage(i18n("Audio split failed: impossible to create audio clip"), ErrorMessage); bool undone = audio_undo(); Q_ASSERT(undone); } } } } else { std::shared_ptr binClip = pCore->projectItemModel()->getClipByBinID(bid); if (dropType == PlaylistState::Disabled) { dropType = getTrackById_const(trackId)->trackType(); } else if (dropType != getTrackById_const(trackId)->trackType()) { qDebug() << "// INCORRECT DRAG, ABORTING"; return false; } QString normalisedBinId = binClipId; if (normalisedBinId.startsWith(QLatin1Char('A')) || normalisedBinId.startsWith(QLatin1Char('V'))) { normalisedBinId.remove(0, 1); } res = requestClipCreation(normalisedBinId, id, dropType, 1.0, local_undo, local_redo); res = res && requestClipMove(id, trackId, position, refreshView, logUndo, local_undo, local_redo); } if (!res) { bool undone = local_undo(); Q_ASSERT(undone); id = -1; return false; } UPDATE_UNDO_REDO(local_redo, local_undo, undo, redo); return true; } bool TimelineModel::requestItemDeletion(int itemId, Fun &undo, Fun &redo) { QWriteLocker locker(&m_lock); if (m_groups->isInGroup(itemId)) { return requestGroupDeletion(itemId, undo, redo); } if (isClip(itemId)) { return requestClipDeletion(itemId, undo, redo); } if (isComposition(itemId)) { return requestCompositionDeletion(itemId, undo, redo); } Q_ASSERT(false); return false; } bool TimelineModel::requestItemDeletion(int itemId, bool logUndo) { QWriteLocker locker(&m_lock); TRACE(itemId, logUndo); Q_ASSERT(isItem(itemId)); QString actionLabel; if (m_groups->isInGroup(itemId)) { actionLabel = i18n("Remove group"); } else { if (isClip(itemId)) { actionLabel = i18n("Delete Clip"); } else { actionLabel = i18n("Delete Composition"); } } Fun undo = []() { return true; }; Fun redo = []() { return true; }; bool res = requestItemDeletion(itemId, undo, redo); if (res && logUndo) { PUSH_UNDO(undo, redo, actionLabel); } TRACE_RES(res); requestClearSelection(true); return res; } bool TimelineModel::requestClipDeletion(int clipId, Fun &undo, Fun &redo) { int trackId = getClipTrackId(clipId); if (trackId != -1) { bool res = getTrackById(trackId)->requestClipDeletion(clipId, true, true, undo, redo); if (!res) { undo(); return false; } } auto operation = deregisterClip_lambda(clipId); auto clip = m_allClips[clipId]; Fun reverse = [this, clip]() { // We capture a shared_ptr to the clip, which means that as long as this undo object lives, the clip object is not deleted. To insert it back it is // sufficient to register it. registerClip(clip, true); return true; }; if (operation()) { UPDATE_UNDO_REDO(operation, reverse, undo, redo); return true; } undo(); return false; } bool TimelineModel::requestCompositionDeletion(int compositionId, Fun &undo, Fun &redo) { int trackId = getCompositionTrackId(compositionId); if (trackId != -1) { bool res = getTrackById(trackId)->requestCompositionDeletion(compositionId, true, true, undo, redo); if (!res) { undo(); return false; } else { unplantComposition(compositionId); } } Fun operation = deregisterComposition_lambda(compositionId); auto composition = m_allCompositions[compositionId]; Fun reverse = [this, composition]() { // We capture a shared_ptr to the composition, which means that as long as this undo object lives, the composition object is not deleted. To insert it // back it is sufficient to register it. registerComposition(composition); return true; }; if (operation()) { UPDATE_UNDO_REDO(operation, reverse, undo, redo); return true; } undo(); return false; } std::unordered_set TimelineModel::getItemsInRange(int trackId, int start, int end, bool listCompositions) { Q_UNUSED(listCompositions) std::unordered_set allClips; if (trackId == -1) { for (const auto &track : m_allTracks) { if (track->isLocked()) { continue; } std::unordered_set clipTracks = getItemsInRange(track->getId(), start, end, listCompositions); allClips.insert(clipTracks.begin(), clipTracks.end()); } } else { std::unordered_set clipTracks = getTrackById(trackId)->getClipsInRange(start, end); allClips.insert(clipTracks.begin(), clipTracks.end()); if (listCompositions) { std::unordered_set compoTracks = getTrackById(trackId)->getCompositionsInRange(start, end); allClips.insert(compoTracks.begin(), compoTracks.end()); } } return allClips; } bool TimelineModel::requestFakeGroupMove(int clipId, int groupId, int delta_track, int delta_pos, bool updateView, bool logUndo) { TRACE(clipId, groupId, delta_track, delta_pos, updateView, logUndo); std::function undo = []() { return true; }; std::function redo = []() { return true; }; bool res = requestFakeGroupMove(clipId, groupId, delta_track, delta_pos, updateView, logUndo, undo, redo); if (res && logUndo) { PUSH_UNDO(undo, redo, i18n("Move group")); } TRACE_RES(res); return res; } bool TimelineModel::requestFakeGroupMove(int clipId, int groupId, int delta_track, int delta_pos, bool updateView, bool finalMove, Fun &undo, Fun &redo, bool allowViewRefresh) { Q_UNUSED(updateView); Q_UNUSED(finalMove); Q_UNUSED(undo); Q_UNUSED(redo); Q_UNUSED(allowViewRefresh); QWriteLocker locker(&m_lock); Q_ASSERT(m_allGroups.count(groupId) > 0); bool ok = true; auto all_items = m_groups->getLeaves(groupId); Q_ASSERT(all_items.size() > 1); Fun local_undo = []() { return true; }; Fun local_redo = []() { return true; }; // Moving groups is a two stage process: first we remove the clips from the tracks, and then try to insert them back at their calculated new positions. // This way, we ensure that no conflict will arise with clips inside the group being moved Fun update_model = []() { return true; }; // Check if there is a track move // First, remove clips std::unordered_map old_track_ids, old_position, old_forced_track; for (int item : all_items) { int old_trackId = getItemTrackId(item); old_track_ids[item] = old_trackId; if (old_trackId != -1) { if (isClip(item)) { old_position[item] = m_allClips[item]->getPosition(); } else { old_position[item] = m_allCompositions[item]->getPosition(); old_forced_track[item] = m_allCompositions[item]->getForcedTrack(); } } } // Second step, calculate delta int audio_delta, video_delta; audio_delta = video_delta = delta_track; if (getTrackById(old_track_ids[clipId])->isAudioTrack()) { // Master clip is audio, so reverse delta for video clips video_delta = -delta_track; } else { audio_delta = -delta_track; } bool trackChanged = false; // Reverse sort. We need to insert from left to right to avoid confusing the view for (int item : all_items) { int current_track_id = old_track_ids[item]; int current_track_position = getTrackPosition(current_track_id); int d = getTrackById(current_track_id)->isAudioTrack() ? audio_delta : video_delta; int target_track_position = current_track_position + d; if (target_track_position >= 0 && target_track_position < getTracksCount()) { auto it = m_allTracks.cbegin(); std::advance(it, target_track_position); int target_track = (*it)->getId(); int target_position = old_position[item] + delta_pos; if (isClip(item)) { qDebug() << "/// SETTING FAKE CLIP: " << target_track << ", POSITION: " << target_position; m_allClips[item]->setFakePosition(target_position); if (m_allClips[item]->getFakeTrackId() != target_track) { trackChanged = true; } m_allClips[item]->setFakeTrackId(target_track); } else { } } else { qDebug() << "// ABORTING; MOVE TRIED ON TRACK: " << target_track_position << "..\n..\n.."; ok = false; } if (!ok) { bool undone = local_undo(); Q_ASSERT(undone); return false; } } QModelIndex modelIndex; QVector roles{FakePositionRole}; if (trackChanged) { roles << FakeTrackIdRole; } for (int item : all_items) { if (isClip(item)) { modelIndex = makeClipIndexFromID(item); } else { modelIndex = makeCompositionIndexFromID(item); } notifyChange(modelIndex, modelIndex, roles); } return true; } bool TimelineModel::requestGroupMove(int itemId, int groupId, int delta_track, int delta_pos, bool updateView, bool logUndo) { QWriteLocker locker(&m_lock); TRACE(itemId, groupId, delta_track, delta_pos, updateView, logUndo); std::function undo = []() { return true; }; std::function redo = []() { return true; }; bool res = requestGroupMove(itemId, groupId, delta_track, delta_pos, updateView, logUndo, undo, redo); if (res && logUndo) { PUSH_UNDO(undo, redo, i18n("Move group")); } TRACE_RES(res); return res; } bool TimelineModel::requestGroupMove(int itemId, int groupId, int delta_track, int delta_pos, bool updateView, bool finalMove, Fun &undo, Fun &redo, bool allowViewRefresh) { QWriteLocker locker(&m_lock); Q_ASSERT(m_allGroups.count(groupId) > 0); Q_ASSERT(isItem(itemId)); if (getGroupElements(groupId).count(itemId) == 0) { // this group doesn't contain the clip, abort return false; } bool ok = true; auto all_items = m_groups->getLeaves(groupId); Q_ASSERT(all_items.size() > 1); Fun local_undo = []() { return true; }; Fun local_redo = []() { return true; }; // Sort clips. We need to delete from right to left to avoid confusing the view, and compositions from top to bottom std::vector sorted_clips(all_items.begin(), all_items.end()); std::sort(sorted_clips.begin(), sorted_clips.end(), [this, delta_track](int clipId1, int clipId2) { int p1 = isClip(clipId1) ? m_allClips[clipId1]->getPosition() : delta_track < 0 ? getTrackMltIndex(m_allCompositions[clipId1]->getCurrentTrackId()) : delta_track > 0 ? -getTrackMltIndex(m_allCompositions[clipId1]->getCurrentTrackId()) : m_allCompositions[clipId1]->getPosition(); int p2 = isClip(clipId2) ? m_allClips[clipId2]->getPosition() : delta_track < 0 ? getTrackMltIndex(m_allCompositions[clipId2]->getCurrentTrackId()) : delta_track > 0 ? -getTrackMltIndex(m_allCompositions[clipId2]->getCurrentTrackId()) : m_allCompositions[clipId2]->getPosition(); return p2 <= p1; }); // Moving groups is a two stage process: first we remove the clips from the tracks, and then try to insert them back at their calculated new positions. // This way, we ensure that no conflict will arise with clips inside the group being moved Fun update_model = []() { return true; }; // Check if there is a track move bool updatePositionOnly = false; if (delta_track == 0 && updateView) { updateView = false; allowViewRefresh = false; updatePositionOnly = true; update_model = [sorted_clips, this]() { QModelIndex modelIndex; QVector roles{StartRole}; for (int item : sorted_clips) { if (isClip(item)) { modelIndex = makeClipIndexFromID(item); } else { modelIndex = makeCompositionIndexFromID(item); } notifyChange(modelIndex, modelIndex, roles); } return true; }; } // First, remove clips std::unordered_map old_track_ids, old_position, old_forced_track; for (int item : sorted_clips) { int old_trackId = getItemTrackId(item); old_track_ids[item] = old_trackId; if (old_trackId != -1) { bool updateThisView = allowViewRefresh; if (isClip(item)) { ok = ok && getTrackById(old_trackId)->requestClipDeletion(item, updateThisView, finalMove, local_undo, local_redo); old_position[item] = m_allClips[item]->getPosition(); } else { // ok = ok && getTrackById(old_trackId)->requestCompositionDeletion(item, updateThisView, finalMove, local_undo, local_redo); old_position[item] = m_allCompositions[item]->getPosition(); old_forced_track[item] = m_allCompositions[item]->getForcedTrack(); } if (!ok) { bool undone = local_undo(); Q_ASSERT(undone); return false; } } } // Second step, reinsert clips at correct positions int audio_delta, video_delta; audio_delta = video_delta = delta_track; if (getTrackById(old_track_ids[itemId])->isAudioTrack()) { // Master clip is audio, so reverse delta for video clips video_delta = -delta_track; } else { audio_delta = -delta_track; } // Reverse sort. We need to insert from left to right to avoid confusing the view std::reverse(std::begin(sorted_clips), std::end(sorted_clips)); for (int item : sorted_clips) { int current_track_id = old_track_ids[item]; int current_track_position = getTrackPosition(current_track_id); int d = getTrackById(current_track_id)->isAudioTrack() ? audio_delta : video_delta; int target_track_position = current_track_position + d; bool updateThisView = allowViewRefresh; if (target_track_position >= 0 && target_track_position < getTracksCount()) { auto it = m_allTracks.cbegin(); std::advance(it, target_track_position); int target_track = (*it)->getId(); int target_position = old_position[item] + delta_pos; if (isClip(item)) { ok = ok && requestClipMove(item, target_track, target_position, updateThisView, finalMove, local_undo, local_redo); } else { ok = ok && requestCompositionMove(item, target_track, old_forced_track[item], target_position, updateThisView, finalMove, local_undo, local_redo); } } else { qDebug() << "// ABORTING; MOVE TRIED ON TRACK: " << target_track_position << "..\n..\n.."; ok = false; } if (!ok) { bool undone = local_undo(); Q_ASSERT(undone); return false; } } if (updatePositionOnly) { update_model(); PUSH_LAMBDA(update_model, local_redo); PUSH_LAMBDA(update_model, local_undo); } UPDATE_UNDO_REDO(local_redo, local_undo, undo, redo); return true; } bool TimelineModel::requestGroupDeletion(int clipId, bool logUndo) { QWriteLocker locker(&m_lock); TRACE(clipId, logUndo); if (!m_groups->isInGroup(clipId)) { TRACE_RES(false); return false; } bool res = requestItemDeletion(clipId, logUndo); TRACE_RES(res); return res; } bool TimelineModel::requestGroupDeletion(int clipId, Fun &undo, Fun &redo) { // we do a breadth first exploration of the group tree, ungroup (delete) every inner node, and then delete all the leaves. std::queue group_queue; group_queue.push(m_groups->getRootId(clipId)); std::unordered_set all_items; std::unordered_set all_compositions; while (!group_queue.empty()) { int current_group = group_queue.front(); bool isSelection = m_currentSelection == current_group; if (isSelection) { m_currentSelection = -1; } group_queue.pop(); Q_ASSERT(isGroup(current_group)); auto children = m_groups->getDirectChildren(current_group); int one_child = -1; // we need the id on any of the indices of the elements of the group for (int c : children) { if (isClip(c)) { all_items.insert(c); one_child = c; } else if (isComposition(c)) { all_compositions.insert(c); one_child = c; } else { Q_ASSERT(isGroup(c)); one_child = c; group_queue.push(c); } } if (one_child != -1) { if (m_groups->getType(current_group) == GroupType::Selection) { Q_ASSERT(isSelection); // in the case of a selection group, we delete the group but don't log it in the undo object Fun tmp_undo = []() { return true; }; Fun tmp_redo = []() { return true; }; m_groups->ungroupItem(one_child, tmp_undo, tmp_redo); } else { bool res = m_groups->ungroupItem(one_child, undo, redo); if (!res) { undo(); return false; } } } } for (int clip : all_items) { bool res = requestClipDeletion(clip, undo, redo); if (!res) { undo(); return false; } } for (int compo : all_compositions) { bool res = requestCompositionDeletion(compo, undo, redo); if (!res) { undo(); return false; } } return true; } int TimelineModel::requestItemResize(int itemId, int size, bool right, bool logUndo, int snapDistance, bool allowSingleResize) { if (logUndo) { qDebug() << "---------------------\n---------------------\nRESIZE W/UNDO CALLED\n++++++++++++++++\n++++"; } QWriteLocker locker(&m_lock); TRACE(itemId, size, right, logUndo, snapDistance, allowSingleResize); Q_ASSERT(isItem(itemId)); if (size <= 0) { TRACE_RES(-1); return -1; } int in = getItemPosition(itemId); int out = in + getItemPlaytime(itemId); if (snapDistance > 0 && getItemTrackId(itemId) != -1) { Fun temp_undo = []() { return true; }; Fun temp_redo = []() { return true; }; if (right && size > out - in && isClip(itemId)) { int targetPos = in + size - 1; int trackId = getItemTrackId(itemId); if (!getTrackById_const(trackId)->isBlankAt(targetPos)) { size = getTrackById_const(trackId)->getBlankEnd(out + 1) - in; } } else if (!right && size > (out - in) && isClip(itemId)) { int targetPos = out - size; int trackId = getItemTrackId(itemId); if (!getTrackById_const(trackId)->isBlankAt(targetPos)) { size = out - getTrackById_const(trackId)->getBlankStart(in - 1); } } int timelinePos = pCore->getTimelinePosition(); m_snaps->addPoint(timelinePos); int proposed_size = m_snaps->proposeSize(in, out, size, right, snapDistance); m_snaps->removePoint(timelinePos); if (proposed_size > 0) { // only test move if proposed_size is valid bool success = false; if (isClip(itemId)) { success = m_allClips[itemId]->requestResize(proposed_size, right, temp_undo, temp_redo, false); } else { success = m_allCompositions[itemId]->requestResize(proposed_size, right, temp_undo, temp_redo, false); } if (success) { temp_undo(); // undo temp move size = proposed_size; } } } Fun undo = []() { return true; }; Fun redo = []() { return true; }; std::unordered_set all_items; if (!allowSingleResize && m_groups->isInGroup(itemId)) { int groupId = m_groups->getRootId(itemId); std::unordered_set items; if (m_groups->getType(groupId) == GroupType::AVSplit) { // Only resize group elements if it is an avsplit items = m_groups->getLeaves(groupId); } else { all_items.insert(itemId); } for (int id : items) { if (id == itemId) { all_items.insert(id); continue; } int start = getItemPosition(id); int end = in + getItemPlaytime(id); if (right) { if (out == end) { all_items.insert(id); } } else if (start == in) { all_items.insert(id); } } } else { all_items.insert(itemId); } bool result = true; for (int id : all_items) { int tid = getItemTrackId(id); if (tid > -1 && getTrackById_const(tid)->isLocked()) { continue; } result = result && requestItemResize(id, size, right, logUndo, undo, redo); } if (!result) { bool undone = undo(); Q_ASSERT(undone); TRACE_RES(-1); return -1; } if (result && logUndo) { if (isClip(itemId)) { PUSH_UNDO(undo, redo, i18n("Resize clip")); } else { PUSH_UNDO(undo, redo, i18n("Resize composition")); } } int res = result ? size : -1; TRACE_RES(res); return res; } bool TimelineModel::requestItemResize(int itemId, int size, bool right, bool logUndo, Fun &undo, Fun &redo, bool blockUndo) { Fun local_undo = []() { return true; }; Fun local_redo = []() { return true; }; Fun update_model = [itemId, right, logUndo, this]() { Q_ASSERT(isItem(itemId)); if (getItemTrackId(itemId) != -1) { qDebug() << "++++++++++\nRESIZING ITEM: " << itemId << "\n+++++++"; QModelIndex modelIndex = isClip(itemId) ? makeClipIndexFromID(itemId) : makeCompositionIndexFromID(itemId); notifyChange(modelIndex, modelIndex, !right, true, logUndo); } return true; }; bool result = false; if (isClip(itemId)) { result = m_allClips[itemId]->requestResize(size, right, local_undo, local_redo, logUndo); } else { Q_ASSERT(isComposition(itemId)); result = m_allCompositions[itemId]->requestResize(size, right, local_undo, local_redo, logUndo); } if (result) { if (!blockUndo) { PUSH_LAMBDA(update_model, local_undo); } PUSH_LAMBDA(update_model, local_redo); update_model(); UPDATE_UNDO_REDO(local_redo, local_undo, undo, redo); } return result; } int TimelineModel::requestClipsGroup(const std::unordered_set &ids, bool logUndo, GroupType type) { QWriteLocker locker(&m_lock); TRACE(ids, logUndo, type); if (type == GroupType::Selection || type == GroupType::Leaf) { // Selections shouldn't be done here. Call requestSetSelection instead TRACE_RES(-1); return -1; } Fun undo = []() { return true; }; Fun redo = []() { return true; }; int result = requestClipsGroup(ids, undo, redo, type); if (result > -1 && logUndo) { PUSH_UNDO(undo, redo, i18n("Group clips")); } TRACE_RES(result); return result; } int TimelineModel::requestClipsGroup(const std::unordered_set &ids, Fun &undo, Fun &redo, GroupType type) { QWriteLocker locker(&m_lock); if (type != GroupType::Selection) { requestClearSelection(); } for (int id : ids) { if (isClip(id)) { if (getClipTrackId(id) == -1) { return -1; } } else if (isComposition(id)) { if (getCompositionTrackId(id) == -1) { return -1; } } else if (!isGroup(id)) { return -1; } } if (type == GroupType::Selection && ids.size() == 1) { // only one element selected, no group created return -1; } int groupId = m_groups->groupItems(ids, undo, redo, type); if (type != GroupType::Selection) { // we make sure that the undo and the redo are going to unselect before doing anything else Fun unselect = [this]() { return requestClearSelection(); }; PUSH_FRONT_LAMBDA(unselect, undo); PUSH_FRONT_LAMBDA(unselect, redo); } return groupId; } bool TimelineModel::requestClipsUngroup(const std::unordered_set &itemIds, bool logUndo) { QWriteLocker locker(&m_lock); TRACE(itemIds, logUndo); Fun undo = []() { return true; }; Fun redo = []() { return true; }; bool result = true; requestClearSelection(); std::unordered_set roots; std::transform(itemIds.begin(), itemIds.end(), std::inserter(roots, roots.begin()), [&](int id) { return m_groups->getRootId(id); }); for (int root : roots) { result = result && requestClipUngroup(root, undo, redo); } if (!result) { bool undone = undo(); Q_ASSERT(undone); } if (result && logUndo) { PUSH_UNDO(undo, redo, i18n("Ungroup clips")); } TRACE_RES(result); return result; } bool TimelineModel::requestClipUngroup(int itemId, bool logUndo) { QWriteLocker locker(&m_lock); TRACE(itemId, logUndo); requestClearSelection(); Fun undo = []() { return true; }; Fun redo = []() { return true; }; bool result = true; result = requestClipUngroup(itemId, undo, redo); if (result && logUndo) { PUSH_UNDO(undo, redo, i18n("Ungroup clips")); } TRACE_RES(result); return result; } bool TimelineModel::requestClipUngroup(int itemId, Fun &undo, Fun &redo) { QWriteLocker locker(&m_lock); bool isSelection = m_groups->getType(m_groups->getRootId(itemId)) == GroupType::Selection; if (!isSelection) { requestClearSelection(); } bool res = m_groups->ungroupItem(itemId, undo, redo); if (res && !isSelection) { // we make sure that the undo and the redo are going to unselect before doing anything else Fun unselect = [this]() { return requestClearSelection(); }; PUSH_FRONT_LAMBDA(unselect, undo); PUSH_FRONT_LAMBDA(unselect, redo); } return res; } bool TimelineModel::requestTrackInsertion(int position, int &id, const QString &trackName, bool audioTrack) { QWriteLocker locker(&m_lock); TRACE(position, id, trackName, audioTrack); Fun undo = []() { return true; }; Fun redo = []() { return true; }; bool result = requestTrackInsertion(position, id, trackName, audioTrack, undo, redo); if (result) { PUSH_UNDO(undo, redo, i18n("Insert Track")); } TRACE_RES(result); return result; } bool TimelineModel::requestTrackInsertion(int position, int &id, const QString &trackName, bool audioTrack, Fun &undo, Fun &redo, bool updateView) { // TODO: make sure we disable overlayTrack before inserting a track if (position == -1) { position = (int)(m_allTracks.size()); } if (position < 0 || position > (int)m_allTracks.size()) { return false; } int trackId = TimelineModel::getNextId(); id = trackId; Fun local_undo = deregisterTrack_lambda(trackId, true); TrackModel::construct(shared_from_this(), trackId, position, trackName, audioTrack); auto track = getTrackById(trackId); Fun local_redo = [track, position, updateView, this]() { // We capture a shared_ptr to the track, which means that as long as this undo object lives, the track object is not deleted. To insert it back it is // sufficient to register it. registerTrack(track, position, updateView); return true; }; UPDATE_UNDO_REDO(local_redo, local_undo, undo, redo); return true; } bool TimelineModel::requestTrackDeletion(int trackId) { // TODO: make sure we disable overlayTrack before deleting a track QWriteLocker locker(&m_lock); TRACE(trackId); Fun undo = []() { return true; }; Fun redo = []() { return true; }; bool result = requestTrackDeletion(trackId, undo, redo); if (result) { if (m_videoTarget == trackId) { m_videoTarget = -1; } if (m_audioTarget == trackId) { m_audioTarget = -1; } PUSH_UNDO(undo, redo, i18n("Delete Track")); } TRACE_RES(result); return result; } bool TimelineModel::requestTrackDeletion(int trackId, Fun &undo, Fun &redo) { Q_ASSERT(isTrack(trackId)); std::vector clips_to_delete; for (const auto &it : getTrackById(trackId)->m_allClips) { clips_to_delete.push_back(it.first); } Fun local_undo = []() { return true; }; Fun local_redo = []() { return true; }; for (int clip : clips_to_delete) { bool res = true; while (res && m_groups->isInGroup(clip)) { res = requestClipUngroup(clip, local_undo, local_redo); } if (res) { res = requestClipDeletion(clip, local_undo, local_redo); } if (!res) { bool u = local_undo(); Q_ASSERT(u); return false; } } int old_position = getTrackPosition(trackId); auto operation = deregisterTrack_lambda(trackId, true); std::shared_ptr track = getTrackById(trackId); Fun reverse = [this, track, old_position]() { // We capture a shared_ptr to the track, which means that as long as this undo object lives, the track object is not deleted. To insert it back it is // sufficient to register it. registerTrack(track, old_position); return true; }; if (operation()) { UPDATE_UNDO_REDO(operation, reverse, local_undo, local_redo); UPDATE_UNDO_REDO(local_redo, local_undo, undo, redo); return true; } local_undo(); return false; } void TimelineModel::registerTrack(std::shared_ptr track, int pos, bool doInsert, bool reloadView) { // qDebug() << "REGISTER TRACK" << track->getId() << pos; int id = track->getId(); if (pos == -1) { pos = static_cast(m_allTracks.size()); } Q_ASSERT(pos >= 0); Q_ASSERT(pos <= static_cast(m_allTracks.size())); // effective insertion (MLT operation), add 1 to account for black background track if (doInsert) { int error = m_tractor->insert_track(*track, pos + 1); Q_ASSERT(error == 0); // we might need better error handling... } // we now insert in the list auto posIt = m_allTracks.begin(); std::advance(posIt, pos); auto it = m_allTracks.insert(posIt, std::move(track)); // it now contains the iterator to the inserted element, we store it Q_ASSERT(m_iteratorTable.count(id) == 0); // check that id is not used (shouldn't happen) m_iteratorTable[id] = it; if (reloadView) { // don't reload view on each track load on project opening _resetView(); } } void TimelineModel::registerClip(const std::shared_ptr &clip, bool registerProducer) { int id = clip->getId(); qDebug() << " // /REQUEST TL CLP REGSTR: " << id << "\n--------\nCLIPS COUNT: " << m_allClips.size(); Q_ASSERT(m_allClips.count(id) == 0); m_allClips[id] = clip; clip->registerClipToBin(clip->getProducer(), registerProducer); m_groups->createGroupItem(id); clip->setTimelineEffectsEnabled(m_timelineEffectsEnabled); } void TimelineModel::registerGroup(int groupId) { Q_ASSERT(m_allGroups.count(groupId) == 0); m_allGroups.insert(groupId); } Fun TimelineModel::deregisterTrack_lambda(int id, bool updateView) { return [this, id, updateView]() { // qDebug() << "DEREGISTER TRACK" << id; auto it = m_iteratorTable[id]; // iterator to the element int index = getTrackPosition(id); // compute index in list m_tractor->remove_track(static_cast(index + 1)); // melt operation, add 1 to account for black background track // send update to the model m_allTracks.erase(it); // actual deletion of object m_iteratorTable.erase(id); // clean table if (updateView) { _resetView(); } return true; }; } Fun TimelineModel::deregisterClip_lambda(int clipId) { return [this, clipId]() { // qDebug() << " // /REQUEST TL CLP DELETION: " << clipId << "\n--------\nCLIPS COUNT: " << m_allClips.size(); requestClearSelection(true); clearAssetView(clipId); Q_ASSERT(m_allClips.count(clipId) > 0); Q_ASSERT(getClipTrackId(clipId) == -1); // clip must be deleted from its track at this point Q_ASSERT(!m_groups->isInGroup(clipId)); // clip must be ungrouped at this point auto clip = m_allClips[clipId]; m_allClips.erase(clipId); clip->deregisterClipToBin(); m_groups->destructGroupItem(clipId); return true; }; } void TimelineModel::deregisterGroup(int id) { Q_ASSERT(m_allGroups.count(id) > 0); m_allGroups.erase(id); } std::shared_ptr TimelineModel::getTrackById(int trackId) { Q_ASSERT(m_iteratorTable.count(trackId) > 0); return *m_iteratorTable[trackId]; } const std::shared_ptr TimelineModel::getTrackById_const(int trackId) const { Q_ASSERT(m_iteratorTable.count(trackId) > 0); return *m_iteratorTable.at(trackId); } bool TimelineModel::addTrackEffect(int trackId, const QString &effectId) { Q_ASSERT(m_iteratorTable.count(trackId) > 0); if ((*m_iteratorTable.at(trackId))->addEffect(effectId) == false) { QString effectName = EffectsRepository::get()->getName(effectId); pCore->displayMessage(i18n("Cannot add effect %1 to selected track", effectName), InformationMessage, 500); return false; } return true; } bool TimelineModel::copyTrackEffect(int trackId, const QString &sourceId) { QStringList source = sourceId.split(QLatin1Char('-')); Q_ASSERT(m_iteratorTable.count(trackId) > 0 && source.count() == 3); int itemType = source.at(0).toInt(); int itemId = source.at(1).toInt(); int itemRow = source.at(2).toInt(); std::shared_ptr effectStack = pCore->getItemEffectStack(itemType, itemId); if ((*m_iteratorTable.at(trackId))->copyEffect(effectStack, itemRow) == false) { pCore->displayMessage(i18n("Cannot paste effect to selected track"), InformationMessage, 500); return false; } return true; } std::shared_ptr TimelineModel::getClipPtr(int clipId) const { Q_ASSERT(m_allClips.count(clipId) > 0); return m_allClips.at(clipId); } bool TimelineModel::addClipEffect(int clipId, const QString &effectId, bool notify) { Q_ASSERT(m_allClips.count(clipId) > 0); bool result = m_allClips.at(clipId)->addEffect(effectId); if (!result && notify) { QString effectName = EffectsRepository::get()->getName(effectId); pCore->displayMessage(i18n("Cannot add effect %1 to selected clip", effectName), InformationMessage, 500); } return result; } bool TimelineModel::removeFade(int clipId, bool fromStart) { Q_ASSERT(m_allClips.count(clipId) > 0); return m_allClips.at(clipId)->removeFade(fromStart); } std::shared_ptr TimelineModel::getClipEffectStack(int itemId) { Q_ASSERT(m_allClips.count(itemId)); return m_allClips.at(itemId)->m_effectStack; } bool TimelineModel::copyClipEffect(int clipId, const QString &sourceId) { QStringList source = sourceId.split(QLatin1Char('-')); Q_ASSERT(m_allClips.count(clipId) && source.count() == 3); int itemType = source.at(0).toInt(); int itemId = source.at(1).toInt(); int itemRow = source.at(2).toInt(); std::shared_ptr effectStack = pCore->getItemEffectStack(itemType, itemId); return m_allClips.at(clipId)->copyEffect(effectStack, itemRow); } bool TimelineModel::adjustEffectLength(int clipId, const QString &effectId, int duration, int initialDuration) { Q_ASSERT(m_allClips.count(clipId)); Fun undo = []() { return true; }; Fun redo = []() { return true; }; bool res = m_allClips.at(clipId)->adjustEffectLength(effectId, duration, initialDuration, undo, redo); if (res && initialDuration > 0) { PUSH_UNDO(undo, redo, i18n("Adjust Fade")); } return res; } std::shared_ptr TimelineModel::getCompositionPtr(int compoId) const { Q_ASSERT(m_allCompositions.count(compoId) > 0); return m_allCompositions.at(compoId); } int TimelineModel::getNextId() { return TimelineModel::next_id++; } bool TimelineModel::isClip(int id) const { return m_allClips.count(id) > 0; } bool TimelineModel::isComposition(int id) const { return m_allCompositions.count(id) > 0; } bool TimelineModel::isItem(int id) const { return isClip(id) || isComposition(id); } bool TimelineModel::isTrack(int id) const { return m_iteratorTable.count(id) > 0; } bool TimelineModel::isGroup(int id) const { return m_allGroups.count(id) > 0; } void TimelineModel::updateDuration() { int current = m_blackClip->get_playtime() - TimelineModel::seekDuration; int duration = 0; for (const auto &tck : m_iteratorTable) { auto track = (*tck.second); duration = qMax(duration, track->trackDuration()); } if (duration != current) { // update black track length m_blackClip->set_in_and_out(0, duration + TimelineModel::seekDuration); emit durationUpdated(); } } int TimelineModel::duration() const { return m_tractor->get_playtime() - TimelineModel::seekDuration; } std::unordered_set TimelineModel::getGroupElements(int clipId) { int groupId = m_groups->getRootId(clipId); return m_groups->getLeaves(groupId); } Mlt::Profile *TimelineModel::getProfile() { return m_profile; } bool TimelineModel::requestReset(Fun &undo, Fun &redo) { std::vector all_ids; for (const auto &track : m_iteratorTable) { all_ids.push_back(track.first); } bool ok = true; for (int trackId : all_ids) { ok = ok && requestTrackDeletion(trackId, undo, redo); } return ok; } void TimelineModel::setUndoStack(std::weak_ptr undo_stack) { m_undoStack = std::move(undo_stack); } int TimelineModel::suggestSnapPoint(int pos, int snapDistance) { int snapped = m_snaps->getClosestPoint(pos); return (qAbs(snapped - pos) < snapDistance ? snapped : pos); } int TimelineModel::getBestSnapPos(int pos, int length, const std::vector &pts, int cursorPosition, int snapDistance) { if (!pts.empty()) { m_snaps->ignore(pts); } m_snaps->addPoint(cursorPosition); int snapped_start = m_snaps->getClosestPoint(pos); int snapped_end = m_snaps->getClosestPoint(pos + length); m_snaps->unIgnore(); m_snaps->removePoint(cursorPosition); int startDiff = qAbs(pos - snapped_start); int endDiff = qAbs(pos + length - snapped_end); if (startDiff < endDiff && startDiff <= snapDistance) { // snap to start return snapped_start; } if (endDiff <= snapDistance) { // snap to end return snapped_end - length; } return -1; } int TimelineModel::getNextSnapPos(int pos) { return m_snaps->getNextPoint(pos); } int TimelineModel::getPreviousSnapPos(int pos) { return m_snaps->getPreviousPoint(pos); } void TimelineModel::addSnap(int pos) { TRACE(pos); return m_snaps->addPoint(pos); } void TimelineModel::removeSnap(int pos) { TRACE(pos); return m_snaps->removePoint(pos); } void TimelineModel::registerComposition(const std::shared_ptr &composition) { int id = composition->getId(); Q_ASSERT(m_allCompositions.count(id) == 0); m_allCompositions[id] = composition; m_groups->createGroupItem(id); } bool TimelineModel::requestCompositionInsertion(const QString &transitionId, int trackId, int position, int length, std::unique_ptr transProps, int &id, bool logUndo) { QWriteLocker locker(&m_lock); // TRACE(transitionId, trackId, position, length, transProps.get(), id, logUndo); Fun undo = []() { return true; }; Fun redo = []() { return true; }; bool result = requestCompositionInsertion(transitionId, trackId, -1, position, length, std::move(transProps), id, undo, redo, logUndo); if (result && logUndo) { PUSH_UNDO(undo, redo, i18n("Insert Composition")); } // TRACE_RES(result); return result; } bool TimelineModel::requestCompositionInsertion(const QString &transitionId, int trackId, int compositionTrack, int position, int length, std::unique_ptr transProps, int &id, Fun &undo, Fun &redo, bool finalMove) { qDebug() << "Inserting compo track" << trackId << "pos" << position << "length" << length; int compositionId = TimelineModel::getNextId(); id = compositionId; Fun local_undo = deregisterComposition_lambda(compositionId); CompositionModel::construct(shared_from_this(), transitionId, compositionId, std::move(transProps)); auto composition = m_allCompositions[compositionId]; Fun local_redo = [composition, this]() { // We capture a shared_ptr to the composition, which means that as long as this undo object lives, the composition object is not deleted. To insert it // back it is sufficient to register it. registerComposition(composition); return true; }; bool res = requestCompositionMove(compositionId, trackId, compositionTrack, position, true, finalMove, local_undo, local_redo); qDebug() << "trying to move" << trackId << "pos" << position << "success " << res; if (res) { res = requestItemResize(compositionId, length, true, true, local_undo, local_redo, true); qDebug() << "trying to resize" << compositionId << "length" << length << "success " << res; } if (!res) { bool undone = local_undo(); Q_ASSERT(undone); id = -1; return false; } UPDATE_UNDO_REDO(local_redo, local_undo, undo, redo); return true; } Fun TimelineModel::deregisterComposition_lambda(int compoId) { return [this, compoId]() { Q_ASSERT(m_allCompositions.count(compoId) > 0); Q_ASSERT(!m_groups->isInGroup(compoId)); // composition must be ungrouped at this point requestClearSelection(true); clearAssetView(compoId); m_allCompositions.erase(compoId); m_groups->destructGroupItem(compoId); return true; }; } int TimelineModel::getCompositionPosition(int compoId) const { Q_ASSERT(m_allCompositions.count(compoId) > 0); const auto trans = m_allCompositions.at(compoId); return trans->getPosition(); } int TimelineModel::getCompositionPlaytime(int compoId) const { READ_LOCK(); Q_ASSERT(m_allCompositions.count(compoId) > 0); const auto trans = m_allCompositions.at(compoId); int playtime = trans->getPlaytime(); return playtime; } int TimelineModel::getItemPosition(int itemId) const { if (isClip(itemId)) { return getClipPosition(itemId); } return getCompositionPosition(itemId); } int TimelineModel::getItemPlaytime(int itemId) const { if (isClip(itemId)) { return getClipPlaytime(itemId); } return getCompositionPlaytime(itemId); } int TimelineModel::getTrackCompositionsCount(int trackId) const { Q_ASSERT(isTrack(trackId)); return getTrackById_const(trackId)->getCompositionsCount(); } bool TimelineModel::requestCompositionMove(int compoId, int trackId, int position, bool updateView, bool logUndo) { QWriteLocker locker(&m_lock); Q_ASSERT(isComposition(compoId)); if (m_allCompositions[compoId]->getPosition() == position && getCompositionTrackId(compoId) == trackId) { return true; } if (m_groups->isInGroup(compoId)) { // element is in a group. int groupId = m_groups->getRootId(compoId); int current_trackId = getCompositionTrackId(compoId); int track_pos1 = getTrackPosition(trackId); int track_pos2 = getTrackPosition(current_trackId); int delta_track = track_pos1 - track_pos2; int delta_pos = position - m_allCompositions[compoId]->getPosition(); return requestGroupMove(compoId, groupId, delta_track, delta_pos, updateView, logUndo); } std::function undo = []() { return true; }; std::function redo = []() { return true; }; int min = getCompositionPosition(compoId); int max = min + getCompositionPlaytime(compoId); int tk = getCompositionTrackId(compoId); bool res = requestCompositionMove(compoId, trackId, m_allCompositions[compoId]->getForcedTrack(), position, updateView, logUndo, undo, redo); if (tk > -1) { min = qMin(min, getCompositionPosition(compoId)); max = qMax(max, getCompositionPosition(compoId)); } else { min = getCompositionPosition(compoId); max = min + getCompositionPlaytime(compoId); } if (res && logUndo) { PUSH_UNDO(undo, redo, i18n("Move composition")); checkRefresh(min, max); } return res; } bool TimelineModel::isAudioTrack(int trackId) const { READ_LOCK(); Q_ASSERT(isTrack(trackId)); auto it = m_iteratorTable.at(trackId); return (*it)->isAudioTrack(); } bool TimelineModel::requestCompositionMove(int compoId, int trackId, int compositionTrack, int position, bool updateView, bool finalMove, Fun &undo, Fun &redo) { QWriteLocker locker(&m_lock); Q_ASSERT(isComposition(compoId)); Q_ASSERT(isTrack(trackId)); if (compositionTrack == -1 || (compositionTrack > 0 && trackId == getTrackIndexFromPosition(compositionTrack - 1))) { // qDebug() << "// compo track: " << trackId << ", PREVIOUS TK: " << getPreviousVideoTrackPos(trackId); compositionTrack = getPreviousVideoTrackPos(trackId); } if (compositionTrack == -1) { // it doesn't make sense to insert a composition on the last track qDebug() << "Move failed because of last track"; return false; } qDebug() << "Requesting composition move" << trackId << "," << position << " ( " << compositionTrack << " / " << (compositionTrack > 0 ? getTrackIndexFromPosition(compositionTrack - 1) : 0); Fun local_undo = []() { return true; }; Fun local_redo = []() { return true; }; bool ok = true; int old_trackId = getCompositionTrackId(compoId); bool notifyViewOnly = false; Fun update_model = []() { return true; }; if (updateView && old_trackId == trackId) { // Move on same track, only send view update updateView = false; notifyViewOnly = true; update_model = [compoId, this]() { QModelIndex modelIndex = makeCompositionIndexFromID(compoId); notifyChange(modelIndex, modelIndex, StartRole); return true; }; } if (old_trackId != -1) { Fun delete_operation = []() { return true; }; Fun delete_reverse = []() { return true; }; if (old_trackId != trackId) { delete_operation = [this, compoId]() { bool res = unplantComposition(compoId); if (res) m_allCompositions[compoId]->setATrack(-1, -1); return res; }; int oldAtrack = m_allCompositions[compoId]->getATrack(); delete_reverse = [this, compoId, oldAtrack, updateView]() { m_allCompositions[compoId]->setATrack(oldAtrack, oldAtrack <= 0 ? -1 : getTrackIndexFromPosition(oldAtrack - 1)); return replantCompositions(compoId, updateView); }; } ok = delete_operation(); if (!ok) qDebug() << "Move failed because of first delete operation"; if (ok) { if (notifyViewOnly) { PUSH_LAMBDA(update_model, local_undo); } UPDATE_UNDO_REDO(delete_operation, delete_reverse, local_undo, local_redo); ok = getTrackById(old_trackId)->requestCompositionDeletion(compoId, updateView, finalMove, local_undo, local_redo); } if (!ok) { qDebug() << "Move failed because of first deletion request"; bool undone = local_undo(); Q_ASSERT(undone); return false; } } ok = getTrackById(trackId)->requestCompositionInsertion(compoId, position, updateView, finalMove, local_undo, local_redo); if (!ok) qDebug() << "Move failed because of second insertion request"; if (ok) { Fun insert_operation = []() { return true; }; Fun insert_reverse = []() { return true; }; if (old_trackId != trackId) { insert_operation = [this, compoId, compositionTrack, updateView]() { qDebug() << "-------------- ATRACK ----------------\n" << compositionTrack << " = " << getTrackIndexFromPosition(compositionTrack); m_allCompositions[compoId]->setATrack(compositionTrack, compositionTrack <= 0 ? -1 : getTrackIndexFromPosition(compositionTrack - 1)); return replantCompositions(compoId, updateView); }; insert_reverse = [this, compoId]() { bool res = unplantComposition(compoId); if (res) m_allCompositions[compoId]->setATrack(-1, -1); return res; }; } ok = insert_operation(); if (!ok) qDebug() << "Move failed because of second insert operation"; if (ok) { if (notifyViewOnly) { PUSH_LAMBDA(update_model, local_redo); } UPDATE_UNDO_REDO(insert_operation, insert_reverse, local_undo, local_redo); } } if (!ok) { bool undone = local_undo(); Q_ASSERT(undone); return false; } update_model(); UPDATE_UNDO_REDO(local_redo, local_undo, undo, redo); return true; } bool TimelineModel::replantCompositions(int currentCompo, bool updateView) { // We ensure that the compositions are planted in a decreasing order of b_track. // For that, there is no better option than to disconnect every composition and then reinsert everything in the correct order. std::vector> compos; for (const auto &compo : m_allCompositions) { int trackId = compo.second->getCurrentTrackId(); if (trackId == -1 || compo.second->getATrack() == -1) { continue; } // Note: we need to retrieve the position of the track, that is its melt index. int trackPos = getTrackMltIndex(trackId); compos.emplace_back(trackPos, compo.first); if (compo.first != currentCompo) { unplantComposition(compo.first); } } // sort by decreasing b_track std::sort(compos.begin(), compos.end(), [](const std::pair &a, const std::pair &b) { return a.first > b.first; }); // replant QScopedPointer field(m_tractor->field()); field->lock(); // Unplant track compositing mlt_service nextservice = mlt_service_get_producer(field->get_service()); mlt_properties properties = MLT_SERVICE_PROPERTIES(nextservice); QString resource = mlt_properties_get(properties, "mlt_service"); mlt_service_type mlt_type = mlt_service_identify(nextservice); QList trackCompositions; while (mlt_type == transition_type) { Mlt::Transition transition((mlt_transition)nextservice); nextservice = mlt_service_producer(nextservice); int internal = transition.get_int("internal_added"); if (internal > 0 && resource != QLatin1String("mix")) { trackCompositions << new Mlt::Transition(transition); field->disconnect_service(transition); transition.disconnect_all_producers(); } if (nextservice == nullptr) { break; } mlt_type = mlt_service_identify(nextservice); properties = MLT_SERVICE_PROPERTIES(nextservice); resource = mlt_properties_get(properties, "mlt_service"); } // Sort track compositing std::sort(trackCompositions.begin(), trackCompositions.end(), [](Mlt::Transition *a, Mlt::Transition *b) { return a->get_b_track() < b->get_b_track(); }); for (const auto &compo : compos) { int aTrack = m_allCompositions[compo.second]->getATrack(); Q_ASSERT(aTrack != -1 && aTrack < m_tractor->count()); int ret = field->plant_transition(*m_allCompositions[compo.second].get(), aTrack, compo.first); qDebug() << "Planting composition " << compo.second << "in " << aTrack << "/" << compo.first << "IN = " << m_allCompositions[compo.second]->getIn() << "OUT = " << m_allCompositions[compo.second]->getOut() << "ret=" << ret; Mlt::Transition &transition = *m_allCompositions[compo.second].get(); transition.set_tracks(aTrack, compo.first); mlt_service consumer = mlt_service_consumer(transition.get_service()); Q_ASSERT(consumer != nullptr); if (ret != 0) { field->unlock(); return false; } } // Replant last tracks compositing while (!trackCompositions.isEmpty()) { Mlt::Transition *firstTr = trackCompositions.takeFirst(); field->plant_transition(*firstTr, firstTr->get_a_track(), firstTr->get_b_track()); } field->unlock(); if (updateView) { QModelIndex modelIndex = makeCompositionIndexFromID(currentCompo); notifyChange(modelIndex, modelIndex, ItemATrack); } return true; } bool TimelineModel::unplantComposition(int compoId) { qDebug() << "Unplanting" << compoId; Mlt::Transition &transition = *m_allCompositions[compoId].get(); mlt_service consumer = mlt_service_consumer(transition.get_service()); Q_ASSERT(consumer != nullptr); QScopedPointer field(m_tractor->field()); field->lock(); field->disconnect_service(transition); int ret = transition.disconnect_all_producers(); mlt_service nextservice = mlt_service_get_producer(transition.get_service()); // mlt_service consumer = mlt_service_consumer(transition.get_service()); Q_ASSERT(nextservice == nullptr); // Q_ASSERT(consumer == nullptr); field->unlock(); return ret != 0; } bool TimelineModel::checkConsistency() { for (const auto &tck : m_iteratorTable) { auto track = (*tck.second); // Check parent/children link for tracks if (auto ptr = track->m_parent.lock()) { if (ptr.get() != this) { qDebug() << "Wrong parent for track" << tck.first; return false; } } else { qDebug() << "NULL parent for track" << tck.first; return false; } // check consistency of track if (!track->checkConsistency()) { qDebug() << "Consistency check failed for track" << tck.first; return false; } } // We store all in/outs of clips to check snap points std::map snaps; // Check parent/children link for clips for (const auto &cp : m_allClips) { auto clip = (cp.second); // Check parent/children link for tracks if (auto ptr = clip->m_parent.lock()) { if (ptr.get() != this) { qDebug() << "Wrong parent for clip" << cp.first; return false; } } else { qDebug() << "NULL parent for clip" << cp.first; return false; } if (getClipTrackId(cp.first) != -1) { snaps[clip->getPosition()] += 1; snaps[clip->getPosition() + clip->getPlaytime()] += 1; } if (!clip->checkConsistency()) { qDebug() << "Consistency check failed for clip" << cp.first; return false; } } for (const auto &cp : m_allCompositions) { auto clip = (cp.second); // Check parent/children link for tracks if (auto ptr = clip->m_parent.lock()) { if (ptr.get() != this) { qDebug() << "Wrong parent for compo" << cp.first; return false; } } else { qDebug() << "NULL parent for compo" << cp.first; return false; } if (getCompositionTrackId(cp.first) != -1) { snaps[clip->getPosition()] += 1; snaps[clip->getPosition() + clip->getPlaytime()] += 1; } } // Check snaps auto stored_snaps = m_snaps->_snaps(); if (snaps.size() != stored_snaps.size()) { qDebug() << "Wrong number of snaps: " << snaps.size() << " == " << stored_snaps.size(); return false; } for (auto i = snaps.begin(), j = stored_snaps.begin(); i != snaps.end(); ++i, ++j) { if (*i != *j) { qDebug() << "Wrong snap info at point" << (*i).first; return false; } } // We check consistency with bin model auto binClips = pCore->projectItemModel()->getAllClipIds(); // First step: all clips referenced by the bin model exist and are inserted for (const auto &binClip : binClips) { auto projClip = pCore->projectItemModel()->getClipByBinID(binClip); for (const auto &insertedClip : projClip->m_registeredClips) { if (auto ptr = insertedClip.second.lock()) { if (ptr.get() == this) { // check we are talking of this timeline if (!isClip(insertedClip.first)) { qDebug() << "Bin model registers a bad clip ID" << insertedClip.first; return false; } } } else { qDebug() << "Bin model registers a clip in a NULL timeline" << insertedClip.first; return false; } } } // Second step: all clips are referenced for (const auto &clip : m_allClips) { auto binId = clip.second->m_binClipId; auto projClip = pCore->projectItemModel()->getClipByBinID(binId); if (projClip->m_registeredClips.count(clip.first) == 0) { qDebug() << "Clip " << clip.first << "not registered in bin"; return false; } } // We now check consistency of the compositions. For that, we list all compositions of the tractor, and see if we have a matching one in our // m_allCompositions std::unordered_set remaining_compo; for (const auto &compo : m_allCompositions) { if (getCompositionTrackId(compo.first) != -1 && m_allCompositions[compo.first]->getATrack() != -1) { remaining_compo.insert(compo.first); // check validity of the consumer Mlt::Transition &transition = *m_allCompositions[compo.first].get(); mlt_service consumer = mlt_service_consumer(transition.get_service()); Q_ASSERT(consumer != nullptr); } } QScopedPointer field(m_tractor->field()); field->lock(); mlt_service nextservice = mlt_service_get_producer(field->get_service()); mlt_service_type mlt_type = mlt_service_identify(nextservice); while (nextservice != nullptr) { if (mlt_type == transition_type) { auto tr = (mlt_transition)nextservice; int currentTrack = mlt_transition_get_b_track(tr); int currentATrack = mlt_transition_get_a_track(tr); int currentIn = (int)mlt_transition_get_in(tr); int currentOut = (int)mlt_transition_get_out(tr); qDebug() << "looking composition IN: " << currentIn << ", OUT: " << currentOut << ", TRACK: " << currentTrack << " / " << currentATrack; int foundId = -1; // we iterate to try to find a matching compo for (int compoId : remaining_compo) { if (getTrackMltIndex(getCompositionTrackId(compoId)) == currentTrack && m_allCompositions[compoId]->getATrack() == currentATrack && m_allCompositions[compoId]->getIn() == currentIn && m_allCompositions[compoId]->getOut() == currentOut) { foundId = compoId; break; } } if (foundId == -1) { qDebug() << "Error, we didn't find matching composition IN: " << currentIn << ", OUT: " << currentOut << ", TRACK: " << currentTrack << " / " << currentATrack; field->unlock(); return false; } qDebug() << "Found"; remaining_compo.erase(foundId); } nextservice = mlt_service_producer(nextservice); if (nextservice == nullptr) { break; } mlt_type = mlt_service_identify(nextservice); } field->unlock(); if (!remaining_compo.empty()) { qDebug() << "Error: We found less compositions than expected. Compositions that have not been found:"; for (int compoId : remaining_compo) { qDebug() << compoId; } return false; } // We check consistency of groups if (!m_groups->checkConsistency(true, true)) { qDebug() << "== ERROR IN GROUP CONSISTENCY"; return false; } // Check that the selection is in a valid state: if (m_currentSelection != -1 && !isClip(m_currentSelection) && !isComposition(m_currentSelection) && !isGroup(m_currentSelection)) { qDebug() << "Selection is in inconsistent state"; return false; } return true; } void TimelineModel::setTimelineEffectsEnabled(bool enabled) { m_timelineEffectsEnabled = enabled; // propagate info to clips for (const auto &clip : m_allClips) { clip.second->setTimelineEffectsEnabled(enabled); } // TODO if we support track effects, they should be disabled here too } std::shared_ptr TimelineModel::producer() { return std::make_shared(tractor()); } void TimelineModel::checkRefresh(int start, int end) { if (m_blockRefresh) { return; } int currentPos = tractor()->position(); if (currentPos >= start && currentPos < end) { emit requestMonitorRefresh(); } } void TimelineModel::clearAssetView(int itemId) { emit requestClearAssetView(itemId); } std::shared_ptr TimelineModel::getCompositionParameterModel(int compoId) const { READ_LOCK(); Q_ASSERT(isComposition(compoId)); return std::static_pointer_cast(m_allCompositions.at(compoId)); } std::shared_ptr TimelineModel::getClipEffectStackModel(int clipId) const { READ_LOCK(); Q_ASSERT(isClip(clipId)); return std::static_pointer_cast(m_allClips.at(clipId)->m_effectStack); } std::shared_ptr TimelineModel::getTrackEffectStackModel(int trackId) { READ_LOCK(); Q_ASSERT(isTrack(trackId)); return getTrackById(trackId)->m_effectStack; } QStringList TimelineModel::extractCompositionLumas() const { QStringList urls; for (const auto &compo : m_allCompositions) { QString luma = compo.second->getProperty(QStringLiteral("resource")); if (!luma.isEmpty()) { urls << QUrl::fromLocalFile(luma).toLocalFile(); } } urls.removeDuplicates(); return urls; } void TimelineModel::adjustAssetRange(int clipId, int in, int out) { Q_UNUSED(clipId) Q_UNUSED(in) Q_UNUSED(out) // pCore->adjustAssetRange(clipId, in, out); } void TimelineModel::requestClipReload(int clipId) { std::function local_undo = []() { return true; }; std::function local_redo = []() { return true; }; // in order to make the producer change effective, we need to unplant / replant the clip in int track int old_trackId = getClipTrackId(clipId); int oldPos = getClipPosition(clipId); int oldOut = getClipIn(clipId) + getClipPlaytime(clipId); // Check if clip out is longer than actual producer duration (if user forced duration) std::shared_ptr binClip = pCore->projectItemModel()->getClipByBinID(getClipBinId(clipId)); bool refreshView = oldOut > (int)binClip->frameDuration(); if (old_trackId != -1) { getTrackById(old_trackId)->requestClipDeletion(clipId, refreshView, true, local_undo, local_redo); } if (old_trackId != -1) { m_allClips[clipId]->refreshProducerFromBin(); getTrackById(old_trackId)->requestClipInsertion(clipId, oldPos, refreshView, true, local_undo, local_redo); } } void TimelineModel::replugClip(int clipId) { int old_trackId = getClipTrackId(clipId); if (old_trackId != -1) { getTrackById(old_trackId)->replugClip(clipId); } } void TimelineModel::requestClipUpdate(int clipId, const QVector &roles) { QModelIndex modelIndex = makeClipIndexFromID(clipId); if (roles.contains(TimelineModel::ReloadThumbRole)) { m_allClips[clipId]->forceThumbReload = !m_allClips[clipId]->forceThumbReload; } notifyChange(modelIndex, modelIndex, roles); } bool TimelineModel::requestClipTimeWarp(int clipId, double speed, Fun &undo, Fun &redo) { QWriteLocker locker(&m_lock); if (qFuzzyCompare(speed, m_allClips[clipId]->getSpeed())) { return true; } std::function local_undo = []() { return true; }; std::function local_redo = []() { return true; }; int oldPos = getClipPosition(clipId); // in order to make the producer change effective, we need to unplant / replant the clip in int track bool success = true; int trackId = getClipTrackId(clipId); if (trackId != -1) { success = success && getTrackById(trackId)->requestClipDeletion(clipId, true, true, local_undo, local_redo); } if (success) { success = m_allClips[clipId]->useTimewarpProducer(speed, local_undo, local_redo); } if (trackId != -1) { success = success && getTrackById(trackId)->requestClipInsertion(clipId, oldPos, true, true, local_undo, local_redo); } if (!success) { local_undo(); return false; } UPDATE_UNDO_REDO(local_redo, local_undo, undo, redo); return success; } bool TimelineModel::requestClipTimeWarp(int clipId, double speed) { QWriteLocker locker(&m_lock); TRACE(clipId, speed); Fun undo = []() { return true; }; Fun redo = []() { return true; }; // Get main clip info int trackId = getClipTrackId(clipId); bool result = true; if (trackId != -1) { // Check if clip has a split partner int splitId = m_groups->getSplitPartner(clipId); if (splitId > -1) { result = requestClipTimeWarp(splitId, speed / 100.0, undo, redo); } if (result) { result = requestClipTimeWarp(clipId, speed / 100.0, undo, redo); } else { pCore->displayMessage(i18n("Change speed failed"), ErrorMessage); undo(); TRACE_RES(false); return false; } } else { // If clip is not inserted on a track, we just change the producer result = m_allClips[clipId]->useTimewarpProducer(speed, undo, redo); } if (result) { PUSH_UNDO(undo, redo, i18n("Change clip speed")); } TRACE_RES(result); return result; } const QString TimelineModel::getTrackTagById(int trackId) const { READ_LOCK(); Q_ASSERT(isTrack(trackId)); bool isAudio = getTrackById_const(trackId)->isAudioTrack(); int count = 1; int totalAudio = 2; auto it = m_allTracks.begin(); bool found = false; while ((isAudio || !found) && it != m_allTracks.end()) { if ((*it)->isAudioTrack()) { totalAudio++; if (isAudio && !found) { count++; } } else if (!isAudio) { count++; } if ((*it)->getId() == trackId) { found = true; } it++; } return isAudio ? QStringLiteral("A%1").arg(totalAudio - count) : QStringLiteral("V%1").arg(count - 1); } void TimelineModel::updateProfile(Mlt::Profile *profile) { m_profile = profile; m_tractor->set_profile(*m_profile); } int TimelineModel::getBlankSizeNearClip(int clipId, bool after) const { READ_LOCK(); Q_ASSERT(m_allClips.count(clipId) > 0); int trackId = getClipTrackId(clipId); if (trackId != -1) { return getTrackById_const(trackId)->getBlankSizeNearClip(clipId, after); } return 0; } int TimelineModel::getPreviousTrackId(int trackId) { READ_LOCK(); Q_ASSERT(isTrack(trackId)); auto it = m_iteratorTable.at(trackId); bool audioWanted = (*it)->isAudioTrack(); while (it != m_allTracks.begin()) { --it; if (it != m_allTracks.begin() && (*it)->isAudioTrack() == audioWanted) { break; } } return it == m_allTracks.begin() ? trackId : (*it)->getId(); } int TimelineModel::getNextTrackId(int trackId) { READ_LOCK(); Q_ASSERT(isTrack(trackId)); auto it = m_iteratorTable.at(trackId); bool audioWanted = (*it)->isAudioTrack(); while (it != m_allTracks.end()) { ++it; if (it != m_allTracks.end() && (*it)->isAudioTrack() == audioWanted) { break; } } return it == m_allTracks.end() ? trackId : (*it)->getId(); } bool TimelineModel::requestClearSelection(bool onDeletion) { QWriteLocker locker(&m_lock); TRACE(); if (m_currentSelection == -1) { TRACE_RES(true); return true; } if (isGroup(m_currentSelection)) { if (m_groups->getType(m_currentSelection) == GroupType::Selection) { + // Reset offset display on clips + std::unordered_set items = getCurrentSelection(); + for (auto &id : items) { + if (isClip(id)) { + m_allClips[id]->clearOffset(); + } + } m_groups->destructGroupItem(m_currentSelection); } } else { Q_ASSERT(onDeletion || isClip(m_currentSelection) || isComposition(m_currentSelection)); } m_currentSelection = -1; emit selectionChanged(); TRACE_RES(true); return true; } void TimelineModel::requestClearSelection(bool onDeletion, Fun &undo, Fun &redo) { Fun operation = [this, onDeletion]() { requestClearSelection(onDeletion); return true; }; Fun reverse = [this, clips = getCurrentSelection()]() { return requestSetSelection(clips); }; if (operation()) { UPDATE_UNDO_REDO(operation, reverse, undo, redo); } } std::unordered_set TimelineModel::getCurrentSelection() const { READ_LOCK(); if (m_currentSelection == -1) { return {}; } if (isGroup(m_currentSelection)) { return m_groups->getLeaves(m_currentSelection); } else { Q_ASSERT(isClip(m_currentSelection) || isComposition(m_currentSelection)); return {m_currentSelection}; } } void TimelineModel::requestAddToSelection(int itemId, bool clear) { QWriteLocker locker(&m_lock); TRACE(itemId, clear); if (clear) { requestClearSelection(); } std::unordered_set selection = getCurrentSelection(); if (selection.count(itemId) == 0) { selection.insert(itemId); requestSetSelection(selection); } } void TimelineModel::requestRemoveFromSelection(int itemId) { QWriteLocker locker(&m_lock); TRACE(itemId); std::unordered_set all_items = {itemId}; int parentGroup = m_groups->getDirectAncestor(itemId); if (parentGroup > -1 && m_groups->getType(parentGroup) != GroupType::Selection) { all_items = m_groups->getLeaves(parentGroup); } std::unordered_set selection = getCurrentSelection(); for (int current_itemId : all_items) { if (selection.count(current_itemId) > 0) { selection.erase(current_itemId); } } requestSetSelection(selection); } bool TimelineModel::requestSetSelection(const std::unordered_set &ids) { QWriteLocker locker(&m_lock); TRACE(ids); requestClearSelection(); // if the items are in groups, we must retrieve their topmost containing groups std::unordered_set roots; std::transform(ids.begin(), ids.end(), std::inserter(roots, roots.begin()), [&](int id) { return m_groups->getRootId(id); }); bool result = true; if (roots.size() == 0) { m_currentSelection = -1; } else if (roots.size() == 1) { m_currentSelection = *(roots.begin()); } else { Fun undo = []() { return true; }; Fun redo = []() { return true; }; + if (ids.size() == 2) { + // Check if we selected 2 clips from the same master + QList pairIds; + for(auto &id : roots) { + if (isClip(id)) { + pairIds << id; + } + } + if (pairIds.size() == 2 && getClipBinId(pairIds.at(0)) == getClipBinId(pairIds.at(1))) { + // Check if they have same bin id + // Both clips have same bin ID, display offset + int pos1 = getClipPosition(pairIds.at(0)); + int pos2 = getClipPosition(pairIds.at(1)); + if (pos2 > pos1) { + int offset = pos2 - pos1 - getClipIn(pairIds.at(1)) - getClipIn(pairIds.at(0)); + if (offset != 0) { + m_allClips[pairIds.at(1)]->setOffset(offset); + m_allClips[pairIds.at(0)]->setOffset(-offset); + } + } else { + int offset = pos1 - pos2 - getClipIn(pairIds.at(0)) - getClipIn(pairIds.at(1)); + if (offset != 0) { + m_allClips[pairIds.at(0)]->setOffset(offset); + m_allClips[pairIds.at(1)]->setOffset(-offset); + } + } + } + } result = (m_currentSelection = m_groups->groupItems(ids, undo, redo, GroupType::Selection)) >= 0; Q_ASSERT(m_currentSelection >= 0); } emit selectionChanged(); return result; } bool TimelineModel::requestSetSelection(const std::unordered_set &ids, Fun &undo, Fun &redo) { Fun reverse = [this]() { requestClearSelection(false); return true; }; Fun operation = [this, ids]() { return requestSetSelection(ids); }; if (operation()) { UPDATE_UNDO_REDO(operation, reverse, undo, redo); return true; } return false; } diff --git a/src/timeline2/model/timelinemodel.hpp b/src/timeline2/model/timelinemodel.hpp index 0c25ab917..8eea22365 100644 --- a/src/timeline2/model/timelinemodel.hpp +++ b/src/timeline2/model/timelinemodel.hpp @@ -1,790 +1,791 @@ /*************************************************************************** * Copyright (C) 2017 by Nicolas Carion * * This file is part of Kdenlive. See www.kdenlive.org. * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) version 3 or any later version accepted by the * * membership of KDE e.V. (or its successor approved by the membership * * of KDE e.V.), which shall act as a proxy defined in Section 14 of * * version 3 of the license. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see . * ***************************************************************************/ #ifndef TIMELINEMODEL_H #define TIMELINEMODEL_H #include "definitions.h" #include "undohelper.hpp" #include #include #include #include #include #include #include #include class AssetParameterModel; class EffectStackModel; class ClipModel; class CompositionModel; class DocUndoStack; class GroupsModel; class SnapModel; class TimelineItemModel; class TrackModel; /* @brief This class represents a Timeline object, as viewed by the backend. In general, the Gui associated with it will send modification queries (such as resize or move), and this class authorize them or not depending on the validity of the modifications. This class also serves to keep track of all objects. It holds pointers to all tracks and clips, and gives them unique IDs on creation. These Ids are used in any interactions with the objects and have nothing to do with Melt IDs. This is the entry point for any modifications that has to be made on an element. The dataflow beyond this entry point may vary, for example when the user request a clip resize, the call is deferred to the clip itself, that check if there is enough data to extend by the requested amount, compute the new in and out, and then asks the track if there is enough room for extension. To avoid any confusion on which function to call first, rembember to always call the version in timeline. This is also required to generate the Undo/Redo operators The undo/redo system is designed around lambda functions. Each time a function executes an elementary change to the model, it writes the corresponding operation and its reverse, respectively in the redo and the undo lambdas. This way, if an operation fails for some reason, we can easily cancel the steps that have been done so far without corrupting anything. The other advantage is that operations are easy to compose, and you get a undo/redo pair for free no matter in which way you combine them. Most of the modification functions are named requestObjectAction. Eg, if the object is a clip and we want to move it, we call requestClipMove. These functions always return a bool indicating success, and when they return false they should guarantee than nothing has been modified. Most of the time, these functions come in two versions: the first one is the entry point if you want to perform only the action (and not compose it with other actions). This version will generally automatically push and Undo object on the Application stack, in case the user later wants to cancel the operation. It also generally goes the extra mile to ensure the operation is done in a way that match the user's expectation: for example requestClipMove checks whether the clip belongs to a group and in that case actually mouves the full group. The other version of the function, if it exists, is intended for composition (using the action as part of a complex operation). It takes as input the undo/redo lambda corresponding to the action that is being performed and accumulates on them. Note that this version does the minimal job: in the example of the requestClipMove, it will not move the full group if the clip is in a group. Generally speaking, we don't check ahead of time if an action is going to succeed or not before applying it. We just apply it naively, and if it fails at some point, we use the undo operator that we are constructing on the fly to revert what we have done so far. For example, when we move a group of clips, we apply the move operation to all the clips inside this group (in the right order). If none fails, we are good, otherwise we revert what we've already done. This kind of behaviour frees us from the burden of simulating the actions before actually applying theme. This is a good thing because this simulation step would be very sensitive to corruptions and small discrepancies, which we try to avoid at all cost. It derives from AbstractItemModel (indirectly through TimelineItemModel) to provide the model to the QML interface. An itemModel is organized with row and columns that contain the data. It can be hierarchical, meaning that a given index (row,column) can contain another level of rows and column. Our organization is as follows: at the top level, each row contains a track. These rows are in the same order as in the actual timeline. Then each of this row contains itself sub-rows that correspond to the clips. Here the order of these sub-rows is unrelated to the chronological order of the clips, but correspond to their Id order. For example, if you have three clips, with ids 12, 45 and 150, they will receive row index 0,1 and 2. This is because the order actually doesn't matter since the clips are rendered based on their positions rather than their row order. The id order has been chosen because it is consistent with a valid ordering of the clips. The columns are never used, so the data is always in column 0 An ModelIndex in the ItemModel consists of a row number, a column number, and a parent index. In our case, tracks have always an empty parent, and the clip have a track index as parent. A ModelIndex can also store one additional integer, and we exploit this feature to store the unique ID of the object it corresponds to. */ class TimelineModel : public QAbstractItemModel_shared_from_this { Q_OBJECT protected: /* @brief this constructor should not be called. Call the static construct instead */ TimelineModel(Mlt::Profile *profile, std::weak_ptr undo_stack); public: friend class TrackModel; template friend class MoveableItem; friend class ClipModel; friend class CompositionModel; friend class GroupsModel; friend class TimelineController; friend struct TimelineFunctions; /// Two level model: tracks and clips on track enum { NameRole = Qt::UserRole + 1, ResourceRole, /// clip only ServiceRole, /// clip only StartRole, /// clip only BinIdRole, /// clip only TrackIdRole, FakeTrackIdRole, FakePositionRole, MarkersRole, /// clip only StatusRole, /// clip only TypeRole, /// clip only KeyframesRole, DurationRole, InPointRole, /// clip only OutPointRole, /// clip only FramerateRole, /// clip only GroupedRole, /// clip only HasAudio, /// clip only CanBeAudioRole, /// clip only CanBeVideoRole, /// clip only IsDisabledRole, /// track only IsAudioRole, SortRole, ShowKeyframesRole, AudioLevelsRole, /// clip only AudioChannelsRole, /// clip only IsCompositeRole, /// track only IsLockedRole, /// track only HeightRole, /// track only TrackTagRole, /// track only FadeInRole, /// clip only FadeOutRole, /// clip only FileHashRole, /// clip only SpeedRole, /// clip only ReloadThumbRole, /// clip only + PositionOffsetRole,/// clip only ItemATrack, /// composition only ItemIdRole, ThumbsFormatRole, /// track only EffectNamesRole, // track and clip only EffectsEnabledRole, // track and clip only GrabbedRole, /// clip+composition only TrackActiveRole, /// track only AudioRecordRole /// track only }; ~TimelineModel() override; Mlt::Tractor *tractor() const { return m_tractor.get(); } /* @brief Load tracks from the current tractor, used on project opening */ void loadTractor(); /* @brief Returns the current tractor's producer, useful fo control seeking, playing, etc */ std::shared_ptr producer(); Mlt::Profile *getProfile(); /* @brief returns the number of tracks */ int getTracksCount() const; /* @brief returns the track index (id) from its position */ int getTrackIndexFromPosition(int pos) const; /* @brief returns the track index (id) from its position */ Q_INVOKABLE bool isAudioTrack(int trackId) const; /* @brief returns the number of clips */ int getClipsCount() const; /* @brief returns the number of compositions */ int getCompositionsCount() const; /* @brief Returns the id of the track containing clip (-1 if it is not inserted) @param clipId Id of the clip to test */ Q_INVOKABLE int getClipTrackId(int clipId) const; /* @brief Returns the id of the track containing composition (-1 if it is not inserted) @param clipId Id of the composition to test */ Q_INVOKABLE int getCompositionTrackId(int compoId) const; /* @brief Convenience function that calls either of the previous ones based on item type*/ Q_INVOKABLE int getItemTrackId(int itemId) const; Q_INVOKABLE int getCompositionPosition(int compoId) const; int getCompositionPlaytime(int compoId) const; /* Returns an item position, item can be clip or composition */ Q_INVOKABLE int getItemPosition(int itemId) const; /* Returns an item duration, item can be clip or composition */ int getItemPlaytime(int itemId) const; /* Returns the current speed of a clip */ double getClipSpeed(int clipId) const; /* @brief Helper function to query the amount of free space around a clip * @param clipId: the queried clip. If it is not inserted on a track, this functions returns 0 * @param after: if true, we return the blank after the clip, otherwise, before. */ int getBlankSizeNearClip(int clipId, bool after) const; /* @brief if the clip belongs to a AVSplit group, then return the id of the other corresponding clip. Otherwise, returns -1 */ int getClipSplitPartner(int clipId) const; /* @brief Helper function that returns true if the given ID corresponds to a clip */ Q_INVOKABLE bool isClip(int id) const; /* @brief Helper function that returns true if the given ID corresponds to a composition */ Q_INVOKABLE bool isComposition(int id) const; /* @brief Helper function that returns true if the given ID corresponds to a timeline item (composition or clip) */ Q_INVOKABLE bool isItem(int id) const; /* @brief Helper function that returns true if the given ID corresponds to a track */ Q_INVOKABLE bool isTrack(int id) const; /* @brief Helper function that returns true if the given ID corresponds to a group */ Q_INVOKABLE bool isGroup(int id) const; /* @brief Given a composition Id, returns its underlying parameter model */ std::shared_ptr getCompositionParameterModel(int compoId) const; /* @brief Given a clip Id, returns its underlying effect stack model */ std::shared_ptr getClipEffectStackModel(int clipId) const; /* @brief Returns the position of clip (-1 if it is not inserted) @param clipId Id of the clip to test */ Q_INVOKABLE int getClipPosition(int clipId) const; Q_INVOKABLE bool addClipEffect(int clipId, const QString &effectId, bool notify = true); Q_INVOKABLE bool addTrackEffect(int trackId, const QString &effectId); bool removeFade(int clipId, bool fromStart); Q_INVOKABLE bool copyClipEffect(int clipId, const QString &sourceId); Q_INVOKABLE bool copyTrackEffect(int trackId, const QString &sourceId); bool adjustEffectLength(int clipId, const QString &effectId, int duration, int initialDuration); /* @brief Returns the closest snap point within snapDistance */ Q_INVOKABLE int suggestSnapPoint(int pos, int snapDistance); /** @brief Return the previous track of same type as source trackId, or trackId if no track found */ Q_INVOKABLE int getPreviousTrackId(int trackId); /** @brief Return the next track of same type as source trackId, or trackId if no track found */ Q_INVOKABLE int getNextTrackId(int trackId); /* @brief Returns the in cut position of a clip @param clipId Id of the clip to test */ int getClipIn(int clipId) const; /* @brief Returns the clip state (audio/video only) */ PlaylistState::ClipState getClipState(int clipId) const; /* @brief Returns the bin id of the clip master @param clipId Id of the clip to test */ const QString getClipBinId(int clipId) const; /* @brief Returns the duration of a clip @param clipId Id of the clip to test */ int getClipPlaytime(int clipId) const; /* @brief Returns the size of the clip's frame (widthxheight) @param clipId Id of the clip to test */ QSize getClipFrameSize(int clipId) const; /* @brief Returns the number of clips in a given track @param trackId Id of the track to test */ int getTrackClipsCount(int trackId) const; /* @brief Returns the number of compositions in a given track @param trackId Id of the track to test */ int getTrackCompositionsCount(int trackId) const; /* @brief Returns the position of the track in the order of the tracks @param trackId Id of the track to test */ int getTrackPosition(int trackId) const; /* @brief Returns the track's index in terms of mlt's internal representation */ int getTrackMltIndex(int trackId) const; /* @brief Returns a sort position for tracks. * @param separated: if true, the tracks will be sorted like: V2,V1,A1,A2 * Otherwise, the tracks will be sorted like V2,A2,V1,A1 */ int getTrackSortValue(int trackId, bool separated) const; /* @brief Returns the ids of the tracks below the given track in the order of the tracks Returns an empty list if no track available @param trackId Id of the track to test */ QList getLowerTracksId(int trackId, TrackType type = TrackType::AnyTrack) const; /* @brief Returns the MLT track index of the video track just below the given track @param trackId Id of the track to test */ int getPreviousVideoTrackPos(int trackId) const; /* @brief Returns the Track id of the video track just below the given track @param trackId Id of the track to test */ int getPreviousVideoTrackIndex(int trackId) const; /* @brief Returns the Id of the corresponding audio track. If trackId corresponds to video1, this will return audio 1 and so on */ int getMirrorAudioTrackId(int trackId) const; int getMirrorVideoTrackId(int trackId) const; int getMirrorTrackId(int trackId) const; /* @brief Move a clip to a specific position This action is undoable Returns true on success. If it fails, nothing is modified. If the clip is not in inserted in a track yet, it gets inserted for the first time. If the clip is in a group, the call is deferred to requestGroupMove @param clipId is the ID of the clip @param trackId is the ID of the target track @param position is the position where we want to move @param updateView if set to false, no signal is sent to qml @param logUndo if set to false, no undo object is stored */ Q_INVOKABLE bool requestClipMove(int clipId, int trackId, int position, bool updateView = true, bool logUndo = true, bool invalidateTimeline = false); /* @brief Move a composition to a specific position This action is undoable Returns true on success. If it fails, nothing is modified. If the clip is not in inserted in a track yet, it gets inserted for the first time. If the clip is in a group, the call is deferred to requestGroupMove @param transid is the ID of the composition @param trackId is the ID of the track */ Q_INVOKABLE bool requestCompositionMove(int compoId, int trackId, int position, bool updateView = true, bool logUndo = true); /* Same function, but accumulates undo and redo, and doesn't check for group*/ bool requestClipMove(int clipId, int trackId, int position, bool updateView, bool invalidateTimeline, Fun &undo, Fun &redo); bool requestCompositionMove(int transid, int trackId, int compositionTrack, int position, bool updateView, bool finalMove, Fun &undo, Fun &redo); /* When timeline edit mode is insert or overwrite, we fake the move (as it will overlap existing clips, and only process the real move on drop */ bool requestFakeClipMove(int clipId, int trackId, int position, bool updateView, bool invalidateTimeline, Fun &undo, Fun &redo); bool requestFakeClipMove(int clipId, int trackId, int position, bool updateView, bool logUndo, bool invalidateTimeline); bool requestFakeGroupMove(int clipId, int groupId, int delta_track, int delta_pos, bool updateView = true, bool logUndo = true); bool requestFakeGroupMove(int clipId, int groupId, int delta_track, int delta_pos, bool updateView, bool finalMove, Fun &undo, Fun &redo, bool allowViewRefresh = true); /* @brief Given an intended move, try to suggest a more valid one (accounting for snaps and missing UI calls) @param clipId id of the clip to move @param trackId id of the target track @param position target position @param snapDistance the maximum distance for a snap result, -1 for no snapping of the clip @param dontRefreshMasterClip when false, no view refresh is attempted */ Q_INVOKABLE int suggestItemMove(int itemId, int trackId, int position, int cursorPosition, int snapDistance = -1); Q_INVOKABLE int suggestClipMove(int clipId, int trackId, int position, int cursorPosition, int snapDistance = -1); Q_INVOKABLE int suggestCompositionMove(int compoId, int trackId, int position, int cursorPosition, int snapDistance = -1); /* @brief Request clip insertion at given position. This action is undoable Returns true on success. If it fails, nothing is modified. @param binClipId id of the clip in the bin @param track Id of the track where to insert @param position Requested position @param ID return parameter of the id of the inserted clip @param logUndo if set to false, no undo object is stored @param refreshView whether the view should be refreshed @param useTargets: if true, the Audio/video split will occur on the set targets. Otherwise, they will be computed as an offset from the middle line */ bool requestClipInsertion(const QString &binClipId, int trackId, int position, int &id, bool logUndo = true, bool refreshView = false, bool useTargets = true); /* Same function, but accumulates undo and redo*/ bool requestClipInsertion(const QString &binClipId, int trackId, int position, int &id, bool logUndo, bool refreshView, bool useTargets, Fun &undo, Fun &redo); protected: /* @brief Creates a new clip instance without inserting it. This action is undoable, returns true on success @param binClipId: Bin id of the clip to insert @param id: return parameter for the id of the newly created clip. @param state: The desired clip state (original, audio/video only). */ bool requestClipCreation(const QString &binClipId, int &id, PlaylistState::ClipState state, double speed, Fun &undo, Fun &redo); public: /* @brief Deletes the given clip or composition from the timeline. This action is undoable. Returns true on success. If it fails, nothing is modified. If the clip/composition is in a group, the call is deferred to requestGroupDeletion @param clipId is the ID of the clip/composition @param logUndo if set to false, no undo object is stored */ Q_INVOKABLE bool requestItemDeletion(int itemId, bool logUndo = true); /* Same function, but accumulates undo and redo*/ bool requestItemDeletion(int itemId, Fun &undo, Fun &redo); /* @brief Move a group to a specific position This action is undoable Returns true on success. If it fails, nothing is modified. If the clips in the group are not in inserted in a track yet, they get inserted for the first time. @param clipId is the id of the clip that triggers the group move @param groupId is the id of the group @param delta_track is the delta applied to the track index @param delta_pos is the requested position change @param updateView if set to false, no signal is sent to qml for the clip clipId @param logUndo if set to true, an undo object is created @param allowViewRefresh if false, the view will never get updated (useful for suggestMove) */ bool requestGroupMove(int itemId, int groupId, int delta_track, int delta_pos, bool updateView = true, bool logUndo = true); bool requestGroupMove(int itemId, int groupId, int delta_track, int delta_pos, bool updateView, bool finalMove, Fun &undo, Fun &redo, bool allowViewRefresh = true); /* @brief Deletes all clips inside the group that contains the given clip. This action is undoable Note that if their is a hierarchy of groups, all of them will be deleted. Returns true on success. If it fails, nothing is modified. @param clipId is the id of the clip that triggers the group deletion */ Q_INVOKABLE bool requestGroupDeletion(int clipId, bool logUndo = true); bool requestGroupDeletion(int clipId, Fun &undo, Fun &redo); /* @brief Change the duration of an item (clip or composition) This action is undoable Returns the real size reached (can be different, if snapping occurs). If it fails, nothing is modified, and -1 is returned @param itemId is the ID of the item @param size is the new size of the item @param right is true if we change the right side of the item, false otherwise @param logUndo if set to true, an undo object is created @param snap if set to true, the resize order will be coerced to use the snapping grid */ Q_INVOKABLE int requestItemResize(int itemId, int size, bool right, bool logUndo = true, int snapDistance = -1, bool allowSingleResize = false); /* Same function, but accumulates undo and redo and doesn't deal with snapping*/ bool requestItemResize(int itemId, int size, bool right, bool logUndo, Fun &undo, Fun &redo, bool blockUndo = false); /* @brief Group together a set of ids The ids are either a group ids or clip ids. The involved clip must already be inserted in a track This action is undoable Returns the group id on success, -1 if it fails and nothing is modified. Typically, ids would be ids of clips, but for convenience, some of them can be ids of groups as well. @param ids Set of ids to group */ int requestClipsGroup(const std::unordered_set &ids, bool logUndo = true, GroupType type = GroupType::Normal); int requestClipsGroup(const std::unordered_set &ids, Fun &undo, Fun &redo, GroupType type = GroupType::Normal); /* @brief Destruct the topmost group containing clip This action is undoable Returns true on success. If it fails, nothing is modified. @param id of the clip to degroup (all clips belonging to the same group will be ungrouped as well) */ bool requestClipUngroup(int itemId, bool logUndo = true); /* Same function, but accumulates undo and redo*/ bool requestClipUngroup(int itemId, Fun &undo, Fun &redo); // convenience functions for several ids at the same time bool requestClipsUngroup(const std::unordered_set &itemIds, bool logUndo = true); /* @brief Create a track at given position This action is undoable Returns true on success. If it fails, nothing is modified. @param Requested position (order). If set to -1, the track is inserted last. @param id is a return parameter that holds the id of the resulting track (-1 on failure) */ bool requestTrackInsertion(int pos, int &id, const QString &trackName = QString(), bool audioTrack = false); /* Same function, but accumulates undo and redo*/ bool requestTrackInsertion(int pos, int &id, const QString &trackName, bool audioTrack, Fun &undo, Fun &redo, bool updateView = true); /* @brief Delete track with given id This also deletes all the clips contained in the track. This action is undoable Returns true on success. If it fails, nothing is modified. @param trackId id of the track to delete */ bool requestTrackDeletion(int trackId); /* Same function, but accumulates undo and redo*/ bool requestTrackDeletion(int trackId, Fun &undo, Fun &redo); /* @brief Get project duration Returns the duration in frames */ int duration() const; static int seekDuration; // Duration after project end where seeking is allowed /* @brief Get all the elements of the same group as the given clip. If there is a group hierarchy, only the topmost group is considered. @param clipId id of the clip to test */ std::unordered_set getGroupElements(int clipId); /* @brief Removes all the elements on the timeline (tracks and clips) */ bool requestReset(Fun &undo, Fun &redo); /* @brief Updates the current the pointer to the current undo_stack Must be called for example when the doc change */ void setUndoStack(std::weak_ptr undo_stack); protected: /* @brief Requests the best snapped position for a clip @param pos is the clip's requested position @param length is the clip's duration @param pts snap points to ignore (for example currently moved clip) @param snapDistance the maximum distance for a snap result, -1 for no snapping @returns best snap position or -1 if no snap point is near */ int getBestSnapPos(int pos, int length, const std::vector &pts = std::vector(), int cursorPosition = 0, int snapDistance = -1); public: /* @brief Requests the next snapped point @param pos is the current position */ int getNextSnapPos(int pos); /* @brief Requests the previous snapped point @param pos is the current position */ int getPreviousSnapPos(int pos); /* @brief Add a new snap point @param pos is the current position */ void addSnap(int pos); /* @brief Remove snap point @param pos is the current position */ void removeSnap(int pos); /* @brief Request composition insertion at given position. This action is undoable Returns true on success. If it fails, nothing is modified. @param transitionId Identifier of the Mlt transition to insert (as given by repository) @param track Id of the track where to insert @param position Requested position @param length Requested initial length. @param id return parameter of the id of the inserted composition @param logUndo if set to false, no undo object is stored */ bool requestCompositionInsertion(const QString &transitionId, int trackId, int position, int length, std::unique_ptr transProps, int &id, bool logUndo = true); /* Same function, but accumulates undo and redo*/ bool requestCompositionInsertion(const QString &transitionId, int trackId, int compositionTrack, int position, int length, std::unique_ptr transProps, int &id, Fun &undo, Fun &redo, bool finalMove = false); /* @brief This function change the global (timeline-wise) enabled state of the effects It disables/enables track and clip effects (recursively) */ void setTimelineEffectsEnabled(bool enabled); /* @brief Get a timeline clip id by its position or -1 if not found */ int getClipByPosition(int trackId, int position) const; /* @brief Get a timeline composition id by its starting position or -1 if not found */ int getCompositionByPosition(int trackId, int position) const; /* @brief Returns a list of all items that are intersect with a given range. * @param trackId is the id of the track for concerned items. Setting trackId to -1 returns items on all tracks * @param start is the position where we the items should start * @param end is the position after which items will not be selected, set to -1 to get all clips on track * @param listCompositions if enabled, the list will also contains composition ids */ std::unordered_set getItemsInRange(int trackId, int start, int end = -1, bool listCompositions = true); /* @brief Returns a list of all luma files used in the project */ QStringList extractCompositionLumas() const; /* @brief Inform asset view of duration change */ virtual void adjustAssetRange(int clipId, int in, int out); void requestClipReload(int clipId); void requestClipUpdate(int clipId, const QVector &roles); /** @brief define current edit mode (normal, insert, overwrite */ void setEditMode(TimelineMode::EditMode mode); Q_INVOKABLE bool normalEdit() const; /** @brief Returns the effectstack of a given clip. */ std::shared_ptr getClipEffectStack(int itemId); std::shared_ptr getTrackEffectStackModel(int trackId); /** @brief Add slowmotion effect to clip in timeline. @param clipId id of the target clip @param speed: speed in percentage. 100 corresponds to original speed, 50 to half the speed This functions create an undo object and also apply the effect to the corresponding audio if there is any. Returns true on success, false otherwise (and nothing is modified) */ bool requestClipTimeWarp(int clipId, double speed); /* @brief Same function as above, but doesn't check for paired audio and accumulate undo/redo */ bool requestClipTimeWarp(int clipId, double speed, Fun &undo, Fun &redo); void replugClip(int clipId); /** @brief Refresh the tractor profile in case a change was requested. */ void updateProfile(Mlt::Profile *profile); /** @brief Clear the current selection @param onDeletion is true when the selection is cleared as a result of a deletion */ Q_INVOKABLE bool requestClearSelection(bool onDeletion = false); // same function with undo/redo accumulation void requestClearSelection(bool onDeletion, Fun &undo, Fun &redo); /** @brief Add the given item to the selection If @param clear is true, the selection is first cleared */ Q_INVOKABLE void requestAddToSelection(int itemId, bool clear = false); /** @brief Remove the given item from the selection */ Q_INVOKABLE void requestRemoveFromSelection(int itemId); /** @brief Set the selection to the set of given ids */ bool requestSetSelection(const std::unordered_set &ids); // same function with undo/redo bool requestSetSelection(const std::unordered_set &ids, Fun &undo, Fun &redo); /** @brief Returns a set containing all the items in the selection */ std::unordered_set getCurrentSelection() const; protected: /* @brief Register a new track. This is a call-back meant to be called from TrackModel @param pos indicates the number of the track we are adding. If this is -1, then we add at the end. */ void registerTrack(std::shared_ptr track, int pos = -1, bool doInsert = true, bool reloadView = true); /* @brief Register a new clip. This is a call-back meant to be called from ClipModel */ void registerClip(const std::shared_ptr &clip, bool registerProducer = false); /* @brief Register a new composition. This is a call-back meant to be called from CompositionModel */ void registerComposition(const std::shared_ptr &composition); /* @brief Register a new group. This is a call-back meant to be called from GroupsModel */ void registerGroup(int groupId); /* @brief Deregister and destruct the track with given id. @parame updateView Whether to send updates to the model. Must be false when called from a constructor/destructor */ Fun deregisterTrack_lambda(int id, bool updateView = false); /* @brief Return a lambda that deregisters and destructs the clip with given id. Note that the clip must already be deleted from its track and groups. */ Fun deregisterClip_lambda(int id); /* @brief Return a lambda that deregisters and destructs the composition with given id. */ Fun deregisterComposition_lambda(int compoId); /* @brief Deregister a group with given id */ void deregisterGroup(int id); /* @brief Helper function to get a pointer to the track, given its id */ std::shared_ptr getTrackById(int trackId); const std::shared_ptr getTrackById_const(int trackId) const; /*@brief Helper function to get a pointer to a clip, given its id*/ std::shared_ptr getClipPtr(int clipId) const; /*@brief Helper function to get a pointer to a composition, given its id*/ std::shared_ptr getCompositionPtr(int compoId) const; /* @brief Returns next valid unique id to create an object */ static int getNextId(); /* @brief unplant and the replant all the compositions in the correct order @param currentCompo is the id of a compo that have not yet been planted, if any. Otherwise send -1 */ bool replantCompositions(int currentCompo, bool updateView); /* @brief Unplant the composition with given Id */ bool unplantComposition(int compoId); /* Internal functions to delete a clip or a composition. In general, you should call requestItemDeletion */ bool requestClipDeletion(int clipId, Fun &undo, Fun &redo); bool requestCompositionDeletion(int compositionId, Fun &undo, Fun &redo); /** @brief Check tracks duration and update black track accordingly */ void updateDuration(); /** @brief Get a track tag (A1, V1, V2,...) through its id */ const QString getTrackTagById(int trackId) const; /** @brief Attempt to make a clip move without ever updating the view */ bool requestClipMoveAttempt(int clipId, int trackId, int position); public: /* @brief Debugging function that checks consistency with Mlt objects */ bool checkConsistency(); protected: /* @brief Refresh project monitor if cursor was inside range */ void checkRefresh(int start, int end); /* @brief Send signal to require clearing effet/composition view */ void clearAssetView(int itemId); bool m_blockRefresh; signals: /* @brief signal triggered by clearAssetView */ void requestClearAssetView(int); void requestMonitorRefresh(); /* @brief signal triggered by track operations */ void invalidateZone(int in, int out); /* @brief signal triggered when a track duration changed (insertion/deletion) */ void durationUpdated(); /* @brief Signal sent whenever the selection changes */ void selectionChanged(); protected: std::unique_ptr m_tractor; std::list> m_allTracks; std::unordered_map>::iterator> m_iteratorTable; // this logs the iterator associated which each track id. This allows easy access of a track based on its id. std::unordered_map> m_allClips; // the keys are the clip id, and the values are the corresponding pointers std::unordered_map> m_allCompositions; // the keys are the composition id, and the values are the corresponding pointers static int next_id; // next valid id to assign std::unique_ptr m_groups; std::shared_ptr m_snaps; std::unordered_set m_allGroups; // ids of all the groups std::weak_ptr m_undoStack; Mlt::Profile *m_profile; // The black track producer. Its length / out should always be adjusted to the projects's length std::unique_ptr m_blackClip; mutable QReadWriteLock m_lock; // This is a lock that ensures safety in case of concurrent access bool m_timelineEffectsEnabled; bool m_id; // id of the timeline itself // id of the selection. If -1, there is no selection, if positive, then it might either be the id of the selection group, or the id of an individual // item, or, finally, the id of a group which is not of type selection. The last case happens when the selection exactly matches an existing group // (in that case we cannot further group it because the selection would have only one child, which is prohibited by design) int m_currentSelection = -1; // The index of the temporary overlay track in tractor, or -1 if not connected int m_overlayTrackCount; // The preferred audio target for clip insertion or -1 if not defined int m_audioTarget; // The preferred video target for clip insertion or -1 if not defined int m_videoTarget; // Timeline editing mode TimelineMode::EditMode m_editMode; // what follows are some virtual function that corresponds to the QML. They are implemented in TimelineItemModel protected: virtual void _beginRemoveRows(const QModelIndex &, int, int) = 0; virtual void _beginInsertRows(const QModelIndex &, int, int) = 0; virtual void _endRemoveRows() = 0; virtual void _endInsertRows() = 0; virtual void notifyChange(const QModelIndex &topleft, const QModelIndex &bottomright, bool start, bool duration, bool updateThumb) = 0; virtual void notifyChange(const QModelIndex &topleft, const QModelIndex &bottomright, const QVector &roles) = 0; virtual void notifyChange(const QModelIndex &topleft, const QModelIndex &bottomright, int role) = 0; virtual QModelIndex makeClipIndexFromID(int) const = 0; virtual QModelIndex makeCompositionIndexFromID(int) const = 0; virtual QModelIndex makeTrackIndexFromID(int) const = 0; virtual void _resetView() = 0; }; #endif diff --git a/src/timeline2/view/qml/Clip.qml b/src/timeline2/view/qml/Clip.qml index 6b9e4af98..4635838c9 100644 --- a/src/timeline2/view/qml/Clip.qml +++ b/src/timeline2/view/qml/Clip.qml @@ -1,876 +1,928 @@ /* * Copyright (c) 2013-2016 Meltytech, LLC * Author: Dan Dennedy * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ import QtQuick 2.6 import QtQuick.Controls 2.2 import Kdenlive.Controls 1.0 import QtQml.Models 2.2 import QtQuick.Window 2.2 import 'Timeline.js' as Logic import com.enums 1.0 Rectangle { id: clipRoot property real timeScale: 1.0 property string clipName: '' property string clipResource: '' property string mltService: '' property string effectNames property int modelStart property real scrollX: 0 property int inPoint: 0 property int outPoint: 0 property int clipDuration: 0 property bool isAudio: false property int audioChannels property bool showKeyframes: false property bool isGrabbed: false property bool grouped: false property var audioLevels property var markers property var keyframeModel property int clipStatus: 0 property int itemType: 0 property int fadeIn: 0 property int fadeOut: 0 property int binId: 0 + property int positionOffset: 0 property var parentTrack property int trackIndex //Index in track repeater property int clipId //Id of the clip in the model property int trackId: -1 // Id of the parent track in the model property int fakeTid: -1 property int fakePosition: 0 property int originalTrackId: -1 property int originalX: x property int originalDuration: clipDuration property int lastValidDuration: clipDuration property int draggedX: x property bool selected: false property bool isLocked: parentTrack && parentTrack.isLocked == true property bool hasAudio property bool canBeAudio property bool canBeVideo property string hash: 'ccc' //TODO property double speed: 1.0 property color borderColor: 'black' property bool forceReloadThumb width : clipDuration * timeScale; opacity: dragProxyArea.drag.active && dragProxy.draggedItem == clipId ? 0.8 : 1.0 signal trimmingIn(var clip, real newDuration, var mouse, bool shiftTrim) signal trimmedIn(var clip, bool shiftTrim) signal trimmingOut(var clip, real newDuration, var mouse, bool shiftTrim) signal trimmedOut(var clip, bool shiftTrim) onIsGrabbedChanged: { if (clipRoot.isGrabbed) { clipRoot.forceActiveFocus(); mouseArea.focus = true } } + function clearAndMove(offset) { + controller.requestClearSelection() + controller.requestClipMove(clipRoot.clipId, clipRoot.trackId, clipRoot.modelStart - offset, true, true, true); + } + onInPointChanged: { if (parentTrack && parentTrack.isAudio && thumbsLoader.item) { thumbsLoader.item.reload() } } onClipResourceChanged: { if (itemType == ProducerType.Color) { color: Qt.darker(getColor(), 1.5) } } ToolTip { visible: mouseArea.containsMouse && !dragProxyArea.pressed font.pixelSize: root.baseUnit delay: 1000 timeout: 5000 background: Rectangle { color: activePalette.alternateBase border.color: activePalette.light } contentItem: Label { color: activePalette.text text: clipRoot.clipName + ' (' + timeline.timecode(clipRoot.inPoint) + '-' + timeline.timecode(clipRoot.outPoint) + ')' } } onKeyframeModelChanged: { console.log('keyframe model changed............') if (effectRow.keyframecanvas) { effectRow.keyframecanvas.requestPaint() } } onClipDurationChanged: { width = clipDuration * timeScale; } onModelStartChanged: { x = modelStart * timeScale; } onFakePositionChanged: { x = fakePosition * timeScale; } onFakeTidChanged: { if (clipRoot.fakeTid > -1 && parentTrack) { if (clipRoot.parent != dragContainer) { var pos = clipRoot.mapToGlobal(clipRoot.x, clipRoot.y); clipRoot.parent = dragContainer pos = clipRoot.mapFromGlobal(pos.x, pos.y) clipRoot.x = pos.x clipRoot.y = pos.y } clipRoot.y = Logic.getTrackById(clipRoot.fakeTid).y } } onForceReloadThumbChanged: { // TODO: find a way to force reload of clip thumbs if (thumbsLoader.item) { thumbsLoader.item.reload() } } onTimeScaleChanged: { x = modelStart * timeScale; width = clipDuration * timeScale; labelRect.x = scrollX > modelStart * timeScale ? scrollX - modelStart * timeScale : 0 if (parentTrack && parentTrack.isAudio) { thumbsLoader.item.reload(); } } onScrollXChanged: { labelRect.x = scrollX > modelStart * timeScale ? scrollX - modelStart * timeScale : 0 } border.color: selected ? root.selectionColor : grouped ? root.groupColor : borderColor border.width: isGrabbed ? 8 : 1.5 function updateDrag() { var itemPos = mapToItem(tracksContainerArea, 0, 0, clipRoot.width, clipRoot.height) initDrag(clipRoot, itemPos, clipRoot.clipId, clipRoot.modelStart, clipRoot.trackId, false) } function getColor() { if (clipStatus == ClipState.Disabled) { return 'grey' } if (itemType == ProducerType.Color) { var color = clipResource.substring(clipResource.length - 9) if (color[0] == '#') { return color } return '#' + color.substring(color.length - 8, color.length - 2) } return isAudio? root.audioColor : root.videoColor } /* function reparent(track) { console.log('TrackId: ',trackId) parent = track height = track.height parentTrack = track trackId = parentTrack.trackId console.log('Reparenting clip to Track: ', trackId) //generateWaveform() } */ property bool variableThumbs: (isAudio || itemType == ProducerType.Color || mltService === '') property bool isImage: itemType == ProducerType.Image property string baseThumbPath: variableThumbs ? '' : 'image://thumbnail/' + binId + '/' + (isImage ? '#0' : '#') property string inThumbPath: (variableThumbs || isImage ) ? baseThumbPath : baseThumbPath + Math.floor(inPoint * speed) property string outThumbPath: (variableThumbs || isImage ) ? baseThumbPath : baseThumbPath + Math.floor(outPoint * speed) DropArea { //Drop area for clips anchors.fill: clipRoot keys: 'kdenlive/effect' property string dropData property string dropSource property int dropRow: -1 onEntered: { dropData = drag.getDataAsString('kdenlive/effect') dropSource = drag.getDataAsString('kdenlive/effectsource') } onDropped: { console.log("Add effect: ", dropData) if (dropSource == '') { // drop from effects list controller.addClipEffect(clipRoot.clipId, dropData); } else { controller.copyClipEffect(clipRoot.clipId, dropSource); } dropSource = '' dropRow = -1 drag.acceptProposedAction } } onAudioLevelsChanged: { if (parentTrack && parentTrack.isAudio && thumbsLoader.item) { thumbsLoader.item.reload() } } MouseArea { id: mouseArea visible: root.activeTool === 0 anchors.fill: clipRoot acceptedButtons: Qt.RightButton hoverEnabled: true cursorShape: dragProxyArea.drag.active ? Qt.ClosedHandCursor : Qt.OpenHandCursor onPressed: { root.stopScrolling = true if (mouse.button == Qt.RightButton) { if (timeline.selection.indexOf(clipRoot.clipId) == -1) { controller.requestAddToSelection(clipRoot.clipId, true) } clipMenu.clipId = clipRoot.clipId clipMenu.clipStatus = clipRoot.clipStatus clipMenu.clipFrame = Math.round(mouse.x / timeline.scaleFactor) clipMenu.grouped = clipRoot.grouped clipMenu.trackId = clipRoot.trackId clipMenu.canBeAudio = clipRoot.canBeAudio clipMenu.canBeVideo = clipRoot.canBeVideo clipMenu.popup() } } Keys.onShortcutOverride: event.accepted = clipRoot.isGrabbed && (event.key === Qt.Key_Left || event.key === Qt.Key_Right || event.key === Qt.Key_Up || event.key === Qt.Key_Down) Keys.onLeftPressed: { controller.requestClipMove(clipRoot.clipId, clipRoot.trackId, clipRoot.modelStart - 1, true, true, true); } Keys.onRightPressed: { controller.requestClipMove(clipRoot.clipId, clipRoot.trackId, clipRoot.modelStart + 1, true, true, true); } Keys.onUpPressed: { controller.requestClipMove(clipRoot.clipId, controller.getNextTrackId(clipRoot.trackId), clipRoot.modelStart, true, true, true); } Keys.onDownPressed: { controller.requestClipMove(clipRoot.clipId, controller.getPreviousTrackId(clipRoot.trackId), clipRoot.modelStart, true, true, true); } onPositionChanged: { var mapped = parentTrack.mapFromItem(clipRoot, mouse.x, mouse.y).x root.mousePosChanged(Math.round(mapped / timeline.scaleFactor)) } onEntered: { var itemPos = mapToItem(tracksContainerArea, 0, 0, width, height) initDrag(clipRoot, itemPos, clipRoot.clipId, clipRoot.modelStart, clipRoot.trackId, false) } onExited: { endDrag() } onWheel: zoomByWheel(wheel) } Item { // Thumbs container anchors.fill: parent anchors.leftMargin: 0 anchors.rightMargin: 0 anchors.topMargin: parent.border.width anchors.bottomMargin: parent.border.width clip: true Loader { id: thumbsLoader asynchronous: true visible: status == Loader.Ready anchors.fill: parent source: parentTrack.isAudio ? (timeline.showAudioThumbnails ? "ClipAudioThumbs.qml" : "") : itemType == ProducerType.Color ? "" : timeline.showThumbnails ? "ClipThumbs.qml" : "" onLoaded: { item.reload() } } } Item { // Clipping container id: container anchors.fill: parent anchors.margins: 1.5 clip: true Rectangle { // text background id: labelRect color: clipRoot.selected ? 'darkred' : '#66000000' width: label.width + 2 height: label.height visible: clipRoot.width > width / 2 Text { id: label text: clipName + (clipRoot.speed != 1.0 ? ' [' + Math.round(clipRoot.speed*100) + '%]': '') font.pixelSize: root.baseUnit * 1.2 anchors { top: labelRect.top left: labelRect.left topMargin: 1 leftMargin: 1 } color: 'white' style: Text.Outline styleColor: 'black' } } + Rectangle { + // Offset info + id: offsetRect + color: 'darkgreen' + width: offsetLabel.width + radius + height: offsetLabel.height + radius: height/3 + x: labelRect.width + 4 + visible: labelRect.visible && positionOffset != 0 + MouseArea { + id: offsetArea + hoverEnabled: true + cursorShape: Qt.PointingHandCursor + anchors.fill: parent + onClicked: { + clearAndMove(positionOffset) + } + ToolTip { + visible: offsetArea.containsMouse + font.pixelSize: root.baseUnit + delay: 1000 + timeout: 5000 + background: Rectangle { + color: activePalette.alternateBase + border.color: activePalette.light + } + contentItem: Label { + color: activePalette.text + text: i18n('Offset') + (positionOffset < 0 ? ( ': -' + timeline.timecode(-positionOffset)) : ': ' + timeline.timecode(positionOffset)) + } + } + Text { + id: offsetLabel + text: positionOffset + font.pixelSize: root.baseUnit * 1.2 + anchors { + horizontalCenter: parent.horizontalCenter + topMargin: 1 + leftMargin: 1 + } + color: 'white' + style: Text.Outline + styleColor: 'black' + } + } + } Rectangle { // effects id: effectsRect color: '#555555' width: effectLabel.width + 2 height: effectLabel.height x: labelRect.x anchors.top: labelRect.bottom visible: labelRect.visible && clipRoot.effectNames != '' Text { id: effectLabel text: clipRoot.effectNames font.pixelSize: root.baseUnit * 1.2 anchors { top: effectsRect.top left: effectsRect.left topMargin: 1 leftMargin: 1 // + ((isAudio || !settings.timelineShowThumbnails) ? 0 : inThumbnail.width) + 1 } color: 'white' //style: Text.Outline styleColor: 'black' } } Repeater { model: markers delegate: Item { anchors.fill: parent Rectangle { id: markerBase width: 1 height: parent.height x: (model.frame - clipRoot.inPoint) * timeScale; color: model.color } Rectangle { visible: mlabel.visible opacity: 0.7 x: markerBase.x radius: 2 width: mlabel.width + 4 height: mlabel.height anchors { bottom: parent.verticalCenter } color: model.color MouseArea { z: 10 anchors.fill: parent acceptedButtons: Qt.LeftButton cursorShape: Qt.PointingHandCursor hoverEnabled: true onDoubleClicked: timeline.editMarker(clipRoot.clipId, clipRoot.modelStart + model.frame - clipRoot.inPoint) onClicked: timeline.position = (clipRoot.x + markerBase.x) / timeline.scaleFactor } } Text { id: mlabel visible: timeline.showMarkers && parent.width > width * 1.5 text: model.comment font.pixelSize: root.baseUnit x: markerBase.x anchors { bottom: parent.verticalCenter topMargin: 2 leftMargin: 2 } color: 'white' } } } KeyframeView { id: effectRow visible: clipRoot.showKeyframes && clipRoot.keyframeModel selected: clipRoot.selected inPoint: clipRoot.inPoint outPoint: clipRoot.outPoint masterObject: clipRoot kfrModel: clipRoot.keyframeModel } } states: [ State { name: 'locked' when: isLocked PropertyChanges { target: clipRoot color: root.lockedColor opacity: 0.8 z: 0 } }, State { name: 'normal' when: clipRoot.selected === false PropertyChanges { target: clipRoot color: Qt.darker(getColor(), 1.5) z: 0 } }, State { name: 'selected' when: clipRoot.selected === true PropertyChanges { target: clipRoot color: getColor() z: 3 } } ] Rectangle { id: compositionIn anchors.left: parent.left anchors.bottom: parent.bottom anchors.bottomMargin: 2 anchors.leftMargin: 4 width: root.baseUnit * 1.2 height: width radius: 2 color: Qt.darker('mediumpurple') border.width: 2 border.color: 'green' opacity: 0 enabled: !clipRoot.isAudio && !dragProxy.isComposition visible: clipRoot.width > 4 * width MouseArea { id: compInArea anchors.fill: parent hoverEnabled: true cursorShape: Qt.PointingHandCursor onEntered: parent.opacity = 0.7 onExited: { if (!pressed) { parent.opacity = 0 } } onPressed: { timeline.addCompositionToClip('', clipRoot.clipId, 0) } onReleased: { parent.opacity = 0 } ToolTip { visible: compInArea.containsMouse && !dragProxyArea.pressed font.pixelSize: root.baseUnit delay: 1000 timeout: 5000 background: Rectangle { color: activePalette.alternateBase border.color: activePalette.light } contentItem: Label { color: activePalette.text - text: 'Click to add composition' + text: i18n('Click to add composition') } } } } Rectangle { id: compositionOut anchors.right: parent.right anchors.bottom: parent.bottom anchors.bottomMargin: 2 anchors.rightMargin: 4 width: root.baseUnit * 1.2 height: width radius: 2 color: Qt.darker('mediumpurple') border.width: 2 border.color: 'green' opacity: 0 enabled: !clipRoot.isAudio visible: clipRoot.width > 4 * width MouseArea { id: compOutArea anchors.fill: parent hoverEnabled: true cursorShape: Qt.PointingHandCursor onEntered: { parent.opacity = 0.7 } onExited: { if (!pressed) { parent.opacity = 0 } } onPressed: { timeline.addCompositionToClip('', clipRoot.clipId, clipRoot.clipDuration - 1) } onReleased: { parent.opacity = 0 } ToolTip { visible: compOutArea.containsMouse && !dragProxyArea.pressed font.pixelSize: root.baseUnit delay: 1000 timeout: 5000 background: Rectangle { color: activePalette.alternateBase border.color: activePalette.light } contentItem: Label { color: activePalette.text - text: 'Click to add composition' + text: i18n('Click to add composition') } } } } TimelineTriangle { id: fadeInTriangle fillColor: 'green' width: Math.min(clipRoot.fadeIn * timeScale, clipRoot.width) height: clipRoot.height - clipRoot.border.width * 2 anchors.left: clipRoot.left anchors.top: clipRoot.top anchors.margins: clipRoot.border.width opacity: 0.3 } Rectangle { id: fadeInControl anchors.left: fadeInTriangle.width > radius? undefined : fadeInTriangle.left anchors.horizontalCenter: fadeInTriangle.width > radius? fadeInTriangle.right : undefined anchors.top: fadeInTriangle.top anchors.topMargin: -10 width: root.baseUnit * 2 height: width radius: width / 2 color: '#FF66FFFF' border.width: 2 border.color: 'green' enabled: !isLocked && !dragProxy.isComposition opacity: 0 visible : clipRoot.width > 3 * width Drag.active: fadeInMouseArea.drag.active MouseArea { id: fadeInMouseArea anchors.fill: parent hoverEnabled: true cursorShape: Qt.PointingHandCursor drag.target: parent drag.minimumX: -root.baseUnit drag.maximumX: container.width drag.axis: Drag.XAxis drag.smoothed: false property int startX property int startFadeIn onEntered: parent.opacity = 0.7 onExited: { if (!pressed) { parent.opacity = 0 } } onPressed: { root.stopScrolling = true startX = Math.round(parent.x / timeScale) startFadeIn = clipRoot.fadeIn parent.anchors.left = undefined parent.anchors.horizontalCenter = undefined parent.opacity = 1 fadeInTriangle.opacity = 0.5 // parentTrack.clipSelected(clipRoot, parentTrack) TODO } onReleased: { root.stopScrolling = false fadeInTriangle.opacity = 0.3 parent.opacity = 0 if (fadeInTriangle.width > parent.radius) parent.anchors.horizontalCenter = fadeInTriangle.right else parent.anchors.left = fadeInTriangle.left console.log('released fade: ', clipRoot.fadeIn) timeline.adjustFade(clipRoot.clipId, 'fadein', clipRoot.fadeIn, startFadeIn) bubbleHelp.hide() } onPositionChanged: { if (mouse.buttons === Qt.LeftButton) { var delta = Math.round(parent.x / timeScale) - startX var duration = Math.max(0, startFadeIn + delta) duration = Math.min(duration, clipRoot.clipDuration) if (duration != clipRoot.fadeIn) { timeline.adjustFade(clipRoot.clipId, 'fadein', duration, -1) // Show fade duration as time in a "bubble" help. var s = timeline.timecode(Math.max(duration, 0)) bubbleHelp.show(clipRoot.x, parentTrack.y + clipRoot.height, s) } } } } SequentialAnimation on scale { loops: Animation.Infinite running: fadeInMouseArea.containsMouse && !fadeInMouseArea.pressed NumberAnimation { from: 1.0 to: 0.7 duration: 250 easing.type: Easing.InOutQuad } NumberAnimation { from: 0.7 to: 1.0 duration: 250 easing.type: Easing.InOutQuad } } } TimelineTriangle { id: fadeOutCanvas fillColor: 'red' width: Math.min(clipRoot.fadeOut * timeScale, clipRoot.width) height: clipRoot.height - clipRoot.border.width * 2 anchors.right: clipRoot.right anchors.top: clipRoot.top anchors.margins: clipRoot.border.width opacity: 0.3 transform: Scale { xScale: -1; origin.x: fadeOutCanvas.width / 2} } Rectangle { id: fadeOutControl anchors.right: fadeOutCanvas.width > radius? undefined : fadeOutCanvas.right anchors.horizontalCenter: fadeOutCanvas.width > radius? fadeOutCanvas.left : undefined anchors.top: fadeOutCanvas.top anchors.topMargin: -10 width: root.baseUnit * 2 height: width radius: width / 2 color: '#66FFFFFF' border.width: 2 border.color: 'red' opacity: 0 enabled: !isLocked && !dragProxy.isComposition Drag.active: fadeOutMouseArea.drag.active visible : clipRoot.width > 3 * width MouseArea { id: fadeOutMouseArea anchors.fill: parent hoverEnabled: true cursorShape: Qt.PointingHandCursor drag.target: parent drag.axis: Drag.XAxis drag.minimumX: -root.baseUnit drag.maximumX: container.width property int startX property int startFadeOut onEntered: parent.opacity = 0.7 onExited: { if (!pressed) { parent.opacity = 0 } } drag.smoothed: false onPressed: { root.stopScrolling = true startX = Math.round(parent.x / timeScale) startFadeOut = clipRoot.fadeOut parent.anchors.right = undefined parent.anchors.horizontalCenter = undefined parent.opacity = 1 fadeOutCanvas.opacity = 0.5 } onReleased: { fadeOutCanvas.opacity = 0.3 parent.opacity = 0 root.stopScrolling = false if (fadeOutCanvas.width > parent.radius) parent.anchors.horizontalCenter = fadeOutCanvas.left else parent.anchors.right = fadeOutCanvas.right timeline.adjustFade(clipRoot.clipId, 'fadeout', clipRoot.fadeOut, startFadeOut) bubbleHelp.hide() } onPositionChanged: { if (mouse.buttons === Qt.LeftButton) { var delta = startX - Math.round(parent.x / timeScale) var duration = Math.max(0, startFadeOut + delta) duration = Math.min(duration, clipRoot.clipDuration) if (clipRoot.fadeOut != duration) { timeline.adjustFade(clipRoot.clipId, 'fadeout', duration, -1) // Show fade duration as time in a "bubble" help. var s = timeline.timecode(Math.max(duration, 0)) bubbleHelp.show(clipRoot.x + clipRoot.width, parentTrack.y + clipRoot.height, s) } } } } SequentialAnimation on scale { loops: Animation.Infinite running: fadeOutMouseArea.containsMouse && !fadeOutMouseArea.pressed NumberAnimation { from: 1.0 to: 0.7 duration: 250 easing.type: Easing.InOutQuad } NumberAnimation { from: 0.7 to: 1.0 duration: 250 easing.type: Easing.InOutQuad } } } Rectangle { id: trimIn anchors.left: clipRoot.left anchors.leftMargin: 0 height: parent.height enabled: !isLocked width: 5 color: isAudio? 'green' : 'lawngreen' opacity: 0 Drag.active: trimInMouseArea.drag.active Drag.proposedAction: Qt.MoveAction visible: trimInMouseArea.pressed || (root.activeTool === 0 && !mouseArea.drag.active && clipRoot.width > 4 * width) MouseArea { id: trimInMouseArea anchors.fill: parent hoverEnabled: true drag.target: parent drag.axis: Drag.XAxis drag.smoothed: false property bool shiftTrim: false property bool sizeChanged: false cursorShape: (containsMouse ? Qt.SizeHorCursor : Qt.ClosedHandCursor); onPressed: { root.stopScrolling = true clipRoot.originalX = clipRoot.x clipRoot.originalDuration = clipDuration parent.anchors.left = undefined shiftTrim = mouse.modifiers & Qt.ShiftModifier parent.opacity = 0 } onReleased: { root.stopScrolling = false parent.anchors.left = clipRoot.left if (sizeChanged) { clipRoot.trimmedIn(clipRoot, shiftTrim) sizeChanged = false } } onPositionChanged: { if (mouse.buttons === Qt.LeftButton) { var delta = Math.round((trimIn.x) / timeScale) if (delta !== 0) { if (delta < -modelStart) { delta = -modelStart } var newDuration = clipDuration - delta sizeChanged = true clipRoot.trimmingIn(clipRoot, newDuration, mouse, shiftTrim) } } } onEntered: { if (!pressed) { parent.opacity = 0.5 } } onExited: { parent.opacity = 0 } } } Rectangle { id: trimOut anchors.right: clipRoot.right anchors.rightMargin: 0 height: parent.height width: 5 color: 'red' opacity: 0 enabled: !isLocked Drag.active: trimOutMouseArea.drag.active Drag.proposedAction: Qt.MoveAction visible: trimOutMouseArea.pressed || (root.activeTool === 0 && !mouseArea.drag.active && clipRoot.width > 4 * width) MouseArea { id: trimOutMouseArea anchors.fill: parent hoverEnabled: true property bool shiftTrim: false property bool sizeChanged: false cursorShape: (containsMouse ? Qt.SizeHorCursor : Qt.ClosedHandCursor); drag.target: parent drag.axis: Drag.XAxis drag.smoothed: false onPressed: { root.stopScrolling = true clipRoot.originalDuration = clipDuration parent.anchors.right = undefined shiftTrim = mouse.modifiers & Qt.ShiftModifier parent.opacity = 0 } onReleased: { root.stopScrolling = false parent.anchors.right = clipRoot.right if (sizeChanged) { clipRoot.trimmedOut(clipRoot, shiftTrim) sizeChanged = false } } onPositionChanged: { if (mouse.buttons === Qt.LeftButton) { var newDuration = Math.round((parent.x + parent.width) / timeScale) if (newDuration != clipDuration) { sizeChanged = true clipRoot.trimmingOut(clipRoot, newDuration, mouse, shiftTrim) } } } onEntered: { if (!pressed) { parent.opacity = 0.5 } } onExited: parent.opacity = 0 } } /*MenuItem { id: mergeItem text: i18n('Merge with next clip') onTriggered: timeline.mergeClipWithNext(trackIndex, index, false) } MenuItem { text: i18n('Rebuild Audio Waveform') onTriggered: timeline.remakeAudioLevels(trackIndex, index) }*/ /*onPopupVisibleChanged: { if (visible && application.OS !== 'OS X' && __popupGeometry.height > 0) { // Try to fix menu running off screen. This only works intermittently. menu.__yOffset = Math.min(0, Screen.height - (__popupGeometry.y + __popupGeometry.height + 40)) menu.__xOffset = Math.min(0, Screen.width - (__popupGeometry.x + __popupGeometry.width)) } }*/ } diff --git a/src/timeline2/view/qml/Track.qml b/src/timeline2/view/qml/Track.qml index 694f06993..545d760c8 100644 --- a/src/timeline2/view/qml/Track.qml +++ b/src/timeline2/view/qml/Track.qml @@ -1,346 +1,352 @@ /* * Copyright (c) 2013-2016 Meltytech, LLC * Author: Dan Dennedy * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ import QtQuick 2.6 import QtQml.Models 2.2 import com.enums 1.0 Column{ id: trackRoot property alias trackModel: trackModel.model property alias rootIndex : trackModel.rootIndex property bool isAudio property bool isMute property bool isHidden property real timeScale: 1.0 property bool isCurrentTrack: false property bool isLocked: false property int trackInternalId : -42 property int trackThumbsFormat property int itemType: 0 /*function redrawWaveforms() { for (var i = 0; i < repeater.count; i++) repeater.itemAt(i).generateWaveform() }*/ function clipAt(index) { return repeater.itemAt(index) } function isClip(type) { return type != ProducerType.Composition && type != ProducerType.Track; } width: clipRow.width DelegateModel { id: trackModel delegate: Item { property var itemModel : model z: model.clipType == ProducerType.Composition ? 5 : 0 Loader { id: loader Binding { target: loader.item property: "timeScale" value: trackRoot.timeScale when: loader.status == Loader.Ready && loader.item } Binding { target: loader.item property: "fakeTid" value: model.fakeTrackId when: loader.status == Loader.Ready && loader.item } Binding { target: loader.item property: "fakePosition" value: model.fakePosition when: loader.status == Loader.Ready && loader.item } Binding { target: loader.item property: "selected" value: loader.item ? root.timelineSelection.indexOf(loader.item.clipId) !== -1 : false when: loader.status == Loader.Ready && model.clipType != ProducerType.Track } Binding { target: loader.item property: "mltService" value: model.mlt_service when: loader.status == Loader.Ready && loader.item } Binding { target: loader.item property: "modelStart" value: model.start when: loader.status == Loader.Ready && loader.item } Binding { target: loader.item property: "scrollX" value: scrollView.flickableItem.contentX when: loader.status == Loader.Ready && loader.item } Binding { target: loader.item property: "fadeIn" value: model.fadeIn when: loader.status == Loader.Ready && isClip(model.clipType) } + Binding { + target: loader.item + property: "positionOffset" + value: model.positionOffset + when: loader.status == Loader.Ready && isClip(model.clipType) + } Binding { target: loader.item property: "effectNames" value: model.effectNames when: loader.status == Loader.Ready && isClip(model.clipType) } Binding { target: loader.item property: "clipStatus" value: model.clipStatus when: loader.status == Loader.Ready && isClip(model.clipType) } Binding { target: loader.item property: "fadeOut" value: model.fadeOut when: loader.status == Loader.Ready && isClip(model.clipType) } Binding { target: loader.item property: "audioLevels" value: model.audioLevels when: loader.status == Loader.Ready && isClip(model.clipType) } Binding { target: loader.item property: "showKeyframes" value: model.showKeyframes when: loader.status == Loader.Ready && loader.item } Binding { target: loader.item property: "isGrabbed" value: model.isGrabbed when: loader.status == Loader.Ready && loader.item } Binding { target: loader.item property: "keyframeModel" value: model.keyframeModel when: loader.status == Loader.Ready && loader.item } Binding { target: loader.item property: "aTrack" value: model.a_track when: loader.status == Loader.Ready && loader.item.clipType == ProducerType.Composition } Binding { target: loader.item property: "trackHeight" value: root.trackHeight when: loader.status == Loader.Ready && loader.item.clipType == ProducerType.Composition } Binding { target: loader.item property: "clipDuration" value: model.duration when: loader.status == Loader.Ready && loader.item } Binding { target: loader.item property: "inPoint" value: model.in when: loader.status == Loader.Ready && loader.item } Binding { target: loader.item property: "outPoint" value: model.out when: loader.status == Loader.Ready && loader.item } Binding { target: loader.item property: "grouped" value: model.grouped when: loader.status == Loader.Ready && loader.item } Binding { target: loader.item property: "clipName" value: model.name when: loader.status == Loader.Ready && loader.item } Binding { target: loader.item property: "clipResource" value: model.resource when: loader.status == Loader.Ready && isClip(model.clipType) } Binding { target: loader.item property: "speed" value: model.speed when: loader.status == Loader.Ready && isClip(model.clipType) } Binding { target: loader.item property: "forceReloadThumb" value: model.reloadThumb when: loader.status == Loader.Ready && isClip(model.clipType) } Binding { target: loader.item property: "binId" value: model.binId when: loader.status == Loader.Ready && isClip(model.clipType) } sourceComponent: { if (isClip(model.clipType)) { return clipDelegate } else if (model.clipType == ProducerType.Composition) { return compositionDelegate } else { // Track return undefined } } onLoaded: { item.clipId= model.item item.parentTrack = trackRoot if (isClip(model.clipType)) { console.log('loaded clip: ', model.start, ', ID: ', model.item, ', index: ', trackRoot.DelegateModel.itemsIndex,', TYPE:', model.clipType) item.isAudio= model.audio item.markers= model.markers item.hasAudio = model.hasAudio item.canBeAudio = model.canBeAudio item.canBeVideo = model.canBeVideo item.itemType = model.clipType item.audioChannels = model.audioChannels //item.binId= model.binId } else if (model.clipType == ProducerType.Composition) { console.log('loaded composition: ', model.start, ', ID: ', model.item, ', index: ', trackRoot.DelegateModel.itemsIndex) //item.aTrack = model.a_track } else { console.log('loaded unwanted element: ', model.item, ', index: ', trackRoot.DelegateModel.itemsIndex) } item.trackId = model.trackId //item.selected= trackRoot.selection.indexOf(item.clipId) !== -1 //console.log(width, height); } } } } Item { id: clipRow height: trackRoot.height Repeater { id: repeater; model: trackModel } } Component { id: clipDelegate Clip { height: trackRoot.height onTrimmingIn: { var new_duration = controller.requestItemResize(clip.clipId, newDuration, false, false, root.snapping, shiftTrim) if (new_duration > 0) { clip.lastValidDuration = new_duration clip.originalX = clip.draggedX // Show amount trimmed as a time in a "bubble" help. var delta = new_duration - clip.originalDuration var s = timeline.timecode(Math.abs(delta)) s = '%1%2 = %3'.arg((delta < 0)? '+' : (delta > 0)? '-' : '') .arg(s) .arg(timeline.timecode(clipDuration)) bubbleHelp.show(clip.x - 20, trackRoot.y + trackRoot.height, s) } } onTrimmedIn: { bubbleHelp.hide() controller.requestItemResize(clip.clipId, clip.originalDuration, false, false, root.snapping, shiftTrim) controller.requestItemResize(clip.clipId, clip.lastValidDuration, false, true, root.snapping, shiftTrim) } onTrimmingOut: { var new_duration = controller.requestItemResize(clip.clipId, newDuration, true, false, root.snapping, shiftTrim) if (new_duration > 0) { clip.lastValidDuration = new_duration // Show amount trimmed as a time in a "bubble" help. var delta = clip.originalDuration - new_duration var s = timeline.timecode(Math.abs(delta)) s = '%1%2 = %3'.arg((delta < 0)? '+' : (delta > 0)? '-' : '') .arg(s) .arg(timeline.timecode(new_duration)) bubbleHelp.show(clip.x + clip.width - 20, trackRoot.y + trackRoot.height, s) } } onTrimmedOut: { bubbleHelp.hide() controller.requestItemResize(clip.clipId, clip.originalDuration, true, false, root.snapping, shiftTrim) controller.requestItemResize(clip.clipId, clip.lastValidDuration, true, true, root.snapping, shiftTrim) } } } Component { id: compositionDelegate Composition { displayHeight: trackRoot.height / 2 opacity: 0.8 selected: root.timelineSelection.indexOf(clipId) !== -1 onTrimmingIn: { var new_duration = controller.requestItemResize(clip.clipId, newDuration, false, false, root.snapping) if (new_duration > 0) { clip.lastValidDuration = newDuration clip.originalX = clip.draggedX // Show amount trimmed as a time in a "bubble" help. var delta = newDuration - clip.originalDuration var s = timeline.timecode(Math.abs(delta)) s = '%1%2 = %3'.arg((delta < 0)? '+' : (delta > 0)? '-' : '') .arg(s) .arg(timeline.timecode(clipDuration)) bubbleHelp.show(clip.x + clip.width, trackRoot.y + trackRoot.height, s) } } onTrimmedIn: { bubbleHelp.hide() controller.requestItemResize(clip.clipId, clip.originalDuration, false, false, root.snapping) controller.requestItemResize(clip.clipId, clip.lastValidDuration, false, true, root.snapping) } onTrimmingOut: { var new_duration = controller.requestItemResize(clip.clipId, newDuration, true, false, root.snapping) if (new_duration > 0) { clip.lastValidDuration = newDuration // Show amount trimmed as a time in a "bubble" help. var delta = newDuration - clip.originalDuration var s = timeline.timecode(Math.abs(delta)) s = '%1%2 = %3'.arg((delta < 0)? '+' : (delta > 0)? '-' : '') .arg(s) .arg(timeline.timecode(clipDuration)) bubbleHelp.show(clip.x + clip.width, trackRoot.y + trackRoot.height, s) } } onTrimmedOut: { bubbleHelp.hide() controller.requestItemResize(clip.clipId, clip.originalDuration, true, false, root.snapping) controller.requestItemResize(clip.clipId, clip.lastValidDuration, true, true, root.snapping) } } } }