Adapt to Windows and change Name

Change-Id: I875ce8a6c90463021c0ee5979fe11a7f1f3a3869
diff --git a/GreenScreen/CMakeLists.txt b/GreenScreen/CMakeLists.txt
new file mode 100644
index 0000000..1f61314
--- /dev/null
+++ b/GreenScreen/CMakeLists.txt
@@ -0,0 +1,157 @@
+cmake_minimum_required(VERSION 3.10)
+
+# set the project name
+set (ProjectName GreenScreen)
+set (Version 1.0)
+
+project(${ProjectName} VERSION ${Version})
+
+set (DAEMON ./../../daemon)
+set (PLUGIN_NAME GreenScreen)
+set (JPL_FILE_NAME ${PLUGIN_NAME}.jpl)
+set (DAEMON_SRC ${DAEMON}/src)
+set (CONTRIB_PATH ${DAEMON}/contrib)
+set (DESTINATION_PATH ./../build/)
+set (PLUGINS_LIB ../lib)
+set (JPL_DIRECTORY ${PROJECT_BINARY_DIR}/jpl)
+
+if(WIN32)
+	message(OS:\  WINDOWS\ ${CMAKE_SYSTEM_PROCESSOR})
+	set (CONTRIB_PLATFORM_CURT ${CMAKE_SYSTEM_PROCESSOR})
+	set (CONTRIB_PLATFORM ${CONTRIB_PLATFORM_CURT}-windows)
+	set (LIBRARY_FILE_NAME lib${PLUGIN_NAME}.dll)
+	set (LIBS_DIR $ENV{HOME}/Documents/GITHUB/Libs)
+	set (OPENCV $ENV{HOME}/Documents/GITHUB/opencv/build-bash/)
+	set (FFMPEG ${CONTRIB_PATH}/build/ffmpeg/Build/win32/x64)
+endif()
+
+if(UNIX)
+	message( FATAL_ERROR "\nUse CMake only for Windows! For linux or Android (linux host), use our bash scripts.\nPlese refer to documentation for more infos." )
+	message(OS:\  LINUX\ ${CMAKE_SYSTEM_PROCESSOR})
+	set (CONTRIB_PLATFORM_CURT ${CMAKE_SYSTEM_PROCESSOR})
+	set (CONTRIB_PLATFORM ${CONTRIB_PLATFORM_CURT}-linux-gnu)
+	set (LIBRARY_FILE_NAME lib${PLUGIN_NAME}.so)
+	set (LIBS_DIR /home/${USER}/Libs)
+endif()
+
+
+message(Building:\   ${ProjectName}\   ${Version})
+message(Build\ path:\ ${PROJECT_BINARY_DIR})
+message(JPL\ assembling\ path:\ ${JPL_DIRECTORY})
+message(JPL\ path:\ ${JPL_DIRECTORY}/../../../build/${ProjectName}/${JPL_FILE_NAME})
+
+set(TENSORFLOW _tensorflow_cc)
+set(model mModel-resnet50float.pb)
+set(modelType .pb)
+set(preferencesFile preferences-tfcc.json)
+set(TFLIB libtensorflow_cc)
+set(GPU -gpu61)
+
+if (CPU)
+	set(GPU )
+	add_definitions(-DCPU)
+	message(CPU\ BUILDING!)
+endif()
+
+if (TFLITE)
+	add_definitions(-DTFLITE)
+	set(TENSORFLOW _tensorflowLite)
+	set(model mobilenet_v2_deeplab_v3_256_myquant.tflite)
+	set(modelType .tflite)
+	set(preferencesFile preferences-tflite.json)
+	set(TFLIB libtensorflowlite)
+	message(TFLITE\ BUILDING!)
+endif()
+
+set(CMAKE_CXX_STANDARD 17)
+set(CMAKE_CXX_STANDARD_REQUIRED True)
+set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT")
+set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd")
+
+set(plugin_SRC main.cpp
+			   pluginInference.cpp
+			   pluginMediaHandler.cpp
+			   pluginParameters.cpp
+			   pluginProcessor.cpp
+			   TFInference.cpp
+			   videoSubscriber.cpp
+			   )
+
+set(plugin_HDR ${ProjectName}Config.h
+			   pluginInference.h
+			   pluginMediaHandler.h
+			   pluginParameters.h
+			   pluginProcessor.h
+			   TFInference.h
+			   TFModels.h
+			   videoSubscriber.h
+			   ../lib/accel.h
+			   ../lib/framescaler.h
+			   ../lib/pluglog.h
+			   )
+
+
+
+# add the library
+add_library(${ProjectName} SHARED ${plugin_SRC}
+								  ${plugin_HDR}
+								  )
+
+target_include_directories(${ProjectName} PUBLIC ${PROJECT_BINARY_DIR}
+												 ${PROJECT_SOURCE_DIR}
+												 ${DAEMON_SRC}
+												 ${CONTRIB_PATH}
+												 ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include
+												 ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4
+												 ${OPENCV}/install/include
+												 ${FFMPEG}/include
+												 ${PLUGINS_LIB}
+												 ${LIBS_DIR}
+												 ${LIBS_DIR}/${TENSORFLOW}/include
+												 ${LIBS_DIR}/${TENSORFLOW}/include/third_party/eigen3
+												 ${LIBS_DIR}/${TENSORFLOW}/include/flatbuffers
+												 )
+
+link_directories(${ProjectName} PUBLIC	${CONTRIB_PATH}
+										${LIBS_DIR}/${TENSORFLOW}/lib/${CONTRIB_PLATFORM}
+										${OPENCV}/install/x64/vc16/staticlib
+										${FFMPEG}/bin
+										${LIBS_DIR}
+										${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib
+										${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty
+										${LIBS_DIR}/${TENSORFLOW}/lib/${CONTRIB_PLATFORM}
+										)
+if (WIN32)
+target_link_libraries(${ProjectName} PUBLIC swscale avutil libpng opencv_imgcodecs411 opencv_imgproc411 opencv_core411 ${TFLIB} zlib)
+endif()
+
+if (UNIX)
+target_link_libraries(${ProjectName} PUBLIC swscale avutil libpng opencv_imgcodecs opencv_imgproc opencv_core ${TFLIB})
+endif()
+
+add_custom_command(
+	TARGET ${ProjectName}
+	PRE_BUILD
+	COMMAND ${CMAKE_COMMAND} -E remove_directory -r ${JPL_DIRECTORY}
+	COMMAND ${CMAKE_COMMAND} -E remove_directory -r ${JPL_DIRECTORY}/../../../build/${ProjectName}
+	COMMAND ${CMAKE_COMMAND} -E make_directory ${JPL_DIRECTORY}/../../../build/${ProjectName}
+	COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/data ${JPL_DIRECTORY}/data
+	COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBS_DIR}/${TENSORFLOW}/lib/ ${JPL_DIRECTORY}/lib
+	COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/modelsSRC/${model} ${JPL_DIRECTORY}/data/models
+	COMMAND ${CMAKE_COMMAND} -E rename ${JPL_DIRECTORY}/data/models/${model} ${JPL_DIRECTORY}/data/models/mModel${modelType}
+	COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/manifest.json ${JPL_DIRECTORY}
+	COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/${preferencesFile} ${JPL_DIRECTORY}/data
+	COMMAND ${CMAKE_COMMAND} -E rename ${JPL_DIRECTORY}/data/${preferencesFile} ${JPL_DIRECTORY}/data/preferences.json
+	COMMENT "Assembling Plugin files"
+)
+
+add_custom_command(
+	TARGET ${ProjectName}
+	POST_BUILD
+	COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_BINARY_DIR}/Release/${LIBRARY_FILE_NAME} ${JPL_DIRECTORY}/lib/
+	if (WIN32)
+		COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_BINARY_DIR}/Release/${ProjectName}.lib ${JPL_DIRECTORY}/lib/
+	endif()
+	COMMAND python ${PROJECT_SOURCE_DIR}/../assemblePlugin.py --plugins=GreenScreen
+	COMMENT "Generating JPL archive"
+)
diff --git a/GreenScreen/TFInference.cpp b/GreenScreen/TFInference.cpp
new file mode 100644
index 0000000..f7053f5
--- /dev/null
+++ b/GreenScreen/TFInference.cpp
@@ -0,0 +1,290 @@
+/**
+ *  Copyright (C) 2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#include "TFInference.h"
+// Std libraries
+#include <fstream>
+#include <numeric>
+#include <iostream>
+#include <stdlib.h>
+
+
+#ifdef TFLITE 
+// Tensorflow headers
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/builtin_op_data.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/optional_debug_tools.h>
+#else
+#ifdef WIN32
+#include <WinBase.h>
+#endif
+#include <tensorflow/core/graph/graph.h>
+//#include <tensorflow/core/graph/default_device.h>
+#include <tensorflow/core/platform/env.h>
+#endif // TFLITE
+
+#include <pluglog.h>
+
+const char sep = separator();
+const std::string TAG = "FORESEG";
+
+namespace jami {
+TensorflowInference::TensorflowInference(TFModel tfModel) : tfModel(tfModel) {}
+
+TensorflowInference::~TensorflowInference() {}
+
+bool
+TensorflowInference::isAllocated() const
+{
+    return allocated;
+}
+
+#ifdef TFLITE
+
+void
+TensorflowInference::loadModel()
+{
+    Plog::log(Plog::LogPriority::INFO, TAG, "inside loadModel()");
+    flatbufferModel = tflite::FlatBufferModel::BuildFromFile(tfModel.modelPath.c_str());
+    if (!flatbufferModel) {
+        std::runtime_error("Failed to load the model file");
+    }
+    Plog::log(Plog::LogPriority::INFO, "TENSOR", "MODEL LOADED" );
+
+}
+void
+TensorflowInference::buildInterpreter()
+{
+    Plog::log(Plog::LogPriority::INFO, TAG, "inside buildInterpreter()");
+    // Build the interpreter
+    tflite::ops::builtin::BuiltinOpResolver resolver;
+    tflite::InterpreterBuilder builder(*flatbufferModel, resolver);
+    builder(&interpreter);
+    if(interpreter) {
+        setInterpreterSettings();
+        Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER BUILT" );
+
+        if (tfModel.useNNAPI) {
+                TfLiteDelegate* optionalNnApiDelegate = tflite::NnApiDelegate();
+
+                if (interpreter->ModifyGraphWithDelegate(optionalNnApiDelegate) != kTfLiteOk) {
+                    Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER ERROR!!!" );
+                }
+                else {
+                    Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER SET" );
+                    allocateTensors();
+                }
+        }
+        else {
+            allocateTensors();
+        }
+    }
+}
+
+void
+TensorflowInference::allocateTensors()
+{
+    if (interpreter->AllocateTensors() != kTfLiteOk) {
+        std::runtime_error("Failed to allocate tensors!");
+    } else {
+        Plog::log(Plog::LogPriority::INFO, "TENSOR", "TENSORS ALLOCATED" );
+        allocated = true;
+    }
+}
+
+void
+TensorflowInference::describeModelTensors() const
+{
+    // PrintInterpreterState(interpreter.get());
+    std::ostringstream oss;
+    oss << "=============== inputs/outputs dimensions ==============="
+            << "\n";
+    const std::vector<int> inputs = interpreter->inputs();
+    const std::vector<int> outputs = interpreter->outputs();
+    oss << "number of inputs: " << inputs.size() << std::endl;
+    oss << "number of outputs: " << outputs.size() << std::endl;
+
+    Plog::log(Plog::LogPriority::INFO, "TENSOR", oss.str() );
+    int input = interpreter->inputs()[0];
+    int output = interpreter->outputs()[0];
+    oss << "input 0 index: " << input << std::endl;
+    oss << "output 0 index: " << output << std::endl;
+    oss << "=============== input dimensions ==============="
+            << std::endl;
+    Plog::log(Plog::LogPriority::INFO, "TENSOR", oss.str() );
+    // get input dimension from the input tensor metadata
+    // assuming one input only
+
+    for (size_t i = 0; i < inputs.size(); i++) {
+        std::stringstream ss;
+        ss << "Input  " << i << "   ➛ ";
+        describeTensor(ss.str(), interpreter->inputs()[i]);
+    }
+    oss.str("");
+    oss << "=============== output dimensions ==============="
+            << "\n";
+    Plog::log(Plog::LogPriority::INFO, "TENSOR", oss.str() );
+    // get input dimension from the input tensor metadata
+    // assuming one input only
+    for (size_t i = 0; i < outputs.size(); i++) {
+        std::stringstream ss;
+        ss << "Output " << i << "   ➛ ";
+        describeTensor(ss.str(), interpreter->outputs()[i]);
+    }
+}
+
+void
+TensorflowInference::setInterpreterSettings()
+{
+    // interpreter->UseNNAPI(tfModel.useNNAPI);
+    interpreter->SetAllowFp16PrecisionForFp32(tfModel.allowFp16PrecisionForFp32);
+
+    interpreter->SetNumThreads(static_cast<int>(tfModel.numberOfThreads));
+}
+
+void
+TensorflowInference::describeTensor(std::string prefix, int index) const
+{
+    std::vector<int> dimensions = getTensorDimensions(index);
+    size_t nbDimensions = dimensions.size();
+
+    std::ostringstream tensorDescription;
+    tensorDescription << prefix;
+    for (size_t i = 0; i < nbDimensions; i++) {
+        if (i == dimensions.size() - 1) {
+            tensorDescription << dimensions[i];
+        }
+        else {
+            tensorDescription << dimensions[i] << " x ";
+        }
+    }
+    tensorDescription << std::endl;
+    Plog::log(Plog::LogPriority::INFO, "TENSOR", tensorDescription.str() );
+}
+
+std::vector<int>
+TensorflowInference::getTensorDimensions(int index) const
+{
+    TfLiteIntArray *dims = interpreter->tensor(index)->dims;
+    size_t size = static_cast<size_t>(interpreter->tensor(index)->dims->size);
+    std::vector<int> result;
+    result.reserve(size);
+    for (size_t i = 0; i != size; i++) {
+        result.push_back(dims->data[i]);
+    }
+
+    dims = nullptr;
+
+    return result;
+}
+
+void
+TensorflowInference::runGraph()
+{
+    for (size_t i = 0; i < tfModel.numberOfRuns; i++) {
+        if (interpreter->Invoke() != kTfLiteOk) {
+            Plog::log(Plog::LogPriority::INFO, "RUN GRAPH", "A problem occured when running the graph");
+        }
+    }
+}
+
+void
+TensorflowInference::init()
+{
+    /// Loading the model
+    loadModel();
+    buildInterpreter();
+    describeModelTensors();
+}
+
+#else
+// Reads a model graph definition from disk, and creates a session object you
+// can use to run it.
+void
+TensorflowInference::LoadGraph()
+{
+    tensorflow::GraphDef graph_def;
+    tensorflow::Status load_graph_status = tensorflow::ReadBinaryProto(tensorflow::Env::Default(), tfModel.modelPath, &graph_def);
+    if (!load_graph_status.ok()) {
+        allocated = false;
+        Plog::log(Plog::LogPriority::INFO, "LOAD GRAPH", "A problem occured when loading the graph");
+        return ;
+    }
+    Plog::log(Plog::LogPriority::INFO, "LOAD GRAPH", "graph loaded");
+
+    // Plog::log(Plog::LogPriority::INFO, "GRAPH SIZE: ", std::to_string(graph_def.node_size()));
+    // for (auto& node : *graph_def.mutable_node())
+    // {
+    //     Plog::log(Plog::LogPriority::INFO, "GRAPH NODE: ", node.name().c_str());
+    //     // Plog::log(Plog::LogPriority::INFO, "\tNODE SIZE: ", node.().c_str());
+    // }
+
+    PluginParameters* parameters = getGlobalPluginParameters();
+
+    tensorflow::SessionOptions options;
+    if(parameters->useGPU) {
+        options.config.mutable_gpu_options()->set_allow_growth(true);
+        options.config.mutable_gpu_options()->set_per_process_gpu_memory_fraction(0.3);
+    }
+    else {
+#ifdef WIN32
+        options.config.mutable_device_count()->insert({ "CPU", 1 });
+        options.config.mutable_device_count()->insert({ "GPU", 0 });
+#else
+	setenv("CUDA_VISIBLE_DEVICES", "", 1);
+#endif
+    }
+    
+    (&session)->reset(tensorflow::NewSession(options));
+    tensorflow::Status session_create_status = session->Create(graph_def);
+    if (!session_create_status.ok()) {
+        Plog::log(Plog::LogPriority::INFO, "INIT SESSION", "A problem occured when initializating session");
+        allocated = true;
+        return ;
+    }
+    Plog::log(Plog::LogPriority::INFO, "INIT SESSION", "session initialized");
+
+    allocated = true;
+}
+
+void
+TensorflowInference::runGraph()
+{
+    for (size_t i = 0; i < tfModel.numberOfRuns; i++) {
+        // Actually run the image through the model.
+        tensorflow::Status run_status = session->Run({{tfModel.inputLayer, imageTensor}}, {tfModel.outputLayer}, {}, &outputs);
+        if (!run_status.ok()) {
+            Plog::log(Plog::LogPriority::INFO, "RUN GRAPH", "A problem occured when running the graph");
+        }
+    }
+}
+
+void
+TensorflowInference::init()
+{
+    // Loading the model
+    LoadGraph();
+}
+#endif
+
+}
diff --git a/GreenScreen/TFInference.h b/GreenScreen/TFInference.h
new file mode 100644
index 0000000..9741119
--- /dev/null
+++ b/GreenScreen/TFInference.h
@@ -0,0 +1,153 @@
+/**
+ *  Copyright (C) 2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#pragma once
+
+// Library headers
+#include "TFModels.h"
+
+// STL
+#include <memory>
+#include <string>
+#include <vector>
+
+#ifdef TFLITE
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/delegates/nnapi/nnapi_delegate.h>
+
+namespace tflite {
+class FlatBufferModel;
+class Interpreter;
+class StatefulNnApiDelegate;
+} // namespace tflite
+
+#else
+#ifdef WIN32
+#define NOMINMAX
+#undef min
+#undef max
+#endif
+
+#include <tensorflow/core/lib/core/status.h>
+#include <tensorflow/core/public/session.h>
+#include <tensorflow/core/framework/tensor.h>
+#include <tensorflow/core/framework/types.pb.h>
+#include <tensorflow/core/platform/init_main.h>
+#include <tensorflow/core/protobuf/config.pb.h>
+
+namespace tensorflow {
+class Tensor;
+class Status;
+class GraphDef;
+class Session;
+struct SessionOptions;
+class TensorShape;
+class Env;
+enum DataType:int;
+} // namespace namespace tensorflow
+
+#endif
+
+
+namespace jami
+{
+class TensorflowInference {
+public:
+    /**
+     * @brief TensorflowInference
+     * Takes a supervised model where the model and labels files are defined
+     * @param model
+     */
+    TensorflowInference(TFModel model);
+    ~TensorflowInference();
+
+#ifdef TFLITE
+    /**
+     * @brief loadModel
+     * Load the model from the file described in the Supervised Model
+     */
+    void loadModel();
+    void buildInterpreter();
+    void setInterpreterSettings();
+
+    /**
+     * @brief allocateTensors
+     * Tries to allocate space for the tensors
+     * In case of success isAllocated() should return true
+     */
+    void allocateTensors();
+
+    // Debug methods
+    void describeModelTensors() const;
+    void describeTensor(std::string prefix, int index) const;
+
+#else
+    void LoadGraph();
+    tensorflow::Tensor imageTensor;
+
+#endif //TFLITE
+
+    /**
+     * @brief runGraph
+     * runs the underlaying graph model.numberOfRuns times
+     * Where numberOfRuns is defined in the model
+     */
+    void runGraph();
+
+    /**
+     * @brief init
+     * Inits the model, interpreter, allocates tensors and load the labels
+     */
+    void init();
+    // Getters
+    bool isAllocated() const;
+
+protected:
+#ifdef TFLITE
+    /**
+     * @brief getTensorDimensions
+     * Utility method to get Tensorflow Tensor dimensions
+     * Given the index of the tensor, the function gives back a vector
+     * Where each element is the dimension of the vector-space (finite dimension)
+     * Thus, vector.size() is the number of vector-space used by the tensor
+     * @param index
+     * @return
+     */
+    std::vector<int> getTensorDimensions(int index) const;
+
+    // Tensorflow model and interpreter
+    std::unique_ptr<tflite::FlatBufferModel> flatbufferModel;
+    std::unique_ptr<tflite::Interpreter> interpreter;
+#else
+    std::unique_ptr<tensorflow::Session> session;
+    std::vector<tensorflow::Tensor> outputs;
+#endif
+    TFModel tfModel;
+    std::vector<std::string> labels;
+
+    /**
+     * @brief nbLabels
+     * The real number of labels may not match the labels.size() because of padding
+     */
+    size_t nbLabels;
+
+    bool allocated = false;
+};
+}
diff --git a/GreenScreen/TFModels.h b/GreenScreen/TFModels.h
new file mode 100644
index 0000000..be0b4ae
--- /dev/null
+++ b/GreenScreen/TFModels.h
@@ -0,0 +1,62 @@
+/*
+ *  Copyright (C) 2004-2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#pragma once
+
+// Std libraries
+#include <string>
+#include <vector>
+#include "pluginParameters.h"
+
+struct TFModelConfiguration {
+    TFModelConfiguration (std::string& model): modelPath{model} {}
+    std::string modelPath;
+    std::vector<unsigned int> normalizationValues;
+    std::vector<int> dims = {1, 385, 385, 3}; //model Input dimensions
+    unsigned int numberOfRuns = 1;
+    // TensorflowLite specific settings
+
+#ifdef TFLITE
+#ifdef __ANDROID__
+    bool useNNAPI = true;
+#else
+    bool useNNAPI = false;
+#endif //__ANDROID__
+    bool allowFp16PrecisionForFp32 = true;
+    unsigned int numberOfThreads = 1;
+
+    // User defined details
+    bool inputFloating = false;
+#else
+    std::string inputLayer = "sub_2";
+    std::string outputLayer = "float_segments";
+#endif // TFLITE
+
+};
+
+struct TFModel : TFModelConfiguration {
+    TFModel(std::string&& model, std::string&& labels): TFModelConfiguration(model), labelsPath{labels}{}
+    TFModel(std::string& model, std::string& labels): TFModelConfiguration(model), labelsPath{labels}{}
+    TFModel(std::string&& model): TFModelConfiguration(model) {}
+    TFModel(std::string& model): TFModelConfiguration(model) {}
+
+    std::string labelsPath = " ";
+    unsigned int labelsPadding = 16;
+};
diff --git a/GreenScreen/build.sh b/GreenScreen/build.sh
new file mode 100644
index 0000000..d9b6fbf
--- /dev/null
+++ b/GreenScreen/build.sh
@@ -0,0 +1,78 @@
+#! /bin/bash
+# Build the plugin for the project
+if [ -z $DAEMON ]; then
+    DAEMON="./../../daemon"
+    echo "DAEMON not provided, building for ${DAEMON}"
+fi
+
+PLUGIN_NAME="GreenScreen"
+JPL_FILE_NAME=${PLUGIN_NAME}".jpl"
+SO_FILE_NAME="lib"${PLUGIN_NAME}".so"
+DAEMON_SRC="${DAEMON}/src"
+CONTRIB_PATH="${DAEMON}/contrib"
+DESTINATION_PATH="./../build/"
+PLUGINS_LIB="../lib"
+LIBS_DIR="/home/${USER}/Libs"
+
+
+CONTRIB_PLATFORM_CURT=x86_64
+CONTRIB_PLATFORM=${CONTRIB_PLATFORM_CURT}-linux-gnu
+
+mkdir -p lib/${CONTRIB_PLATFORM}
+mkdir -p ${DESTINATION_PATH}/jpl
+
+# Compile
+clang++ -std=c++17 -shared -fPIC \
+-Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
+-Wall -Wextra \
+-Wno-unused-variable \
+-Wno-unused-function \
+-Wno-unused-parameter \
+-DTFLITE \
+-I"." \
+-I${DAEMON_SRC} \
+-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
+-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
+-I${LIBS_DIR}/_tensorflow_distribution/include/flatbuffers \
+-I${LIBS_DIR}/_tensorflow_distribution/include \
+-I${PLUGINS_LIB} \
+main.cpp \
+videoSubscriber.cpp \
+pluginProcessor.cpp \
+pluginMediaHandler.cpp \
+TFInference.cpp \
+pluginInference.cpp \
+pluginParameters.cpp \
+-L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/ \
+-L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty/ \
+-L${LIBS_DIR}/_tensorflow_distribution/lib/${CONTRIB_PLATFORM}/ \
+-lswscale \
+-lavutil \
+-lopencv_imgcodecs \
+-lopencv_imgproc \
+-lopencv_core \
+-ltensorflowlite \
+-lpng \
+-o lib/${CONTRIB_PLATFORM}/${SO_FILE_NAME}
+# (above) Always put opencv_core after all other opencv libs
+# (above) Always put avutil after all other ffmpeg libs
+# (above) Always put png after all other libs
+
+mkdir ./data/models
+cp ${LIBS_DIR}/_tensorflow_distribution/lib/${CONTRIB_PLATFORM}/libtensorflowlite.so lib/$CONTRIB_PLATFORM
+cp /usr/lib/${CONTRIB_PLATFORM}/libswscale.so.4 lib/$CONTRIB_PLATFORM
+cp /usr/lib/${CONTRIB_PLATFORM}/libavutil.so.55 lib/$CONTRIB_PLATFORM
+cp /usr/lib/${CONTRIB_PLATFORM}/libpng16.so.16 lib/$CONTRIB_PLATFORM
+
+cp ./modelsSRC/mobilenet_v2_deeplab_v3_256_myquant.tflite ./data/models/mModel.tflite
+cp ./preferences-tflite.json ./data/preferences.json
+
+zip -r ${JPL_FILE_NAME} data manifest.json lib
+mv ${JPL_FILE_NAME} ${DESTINATION_PATH}/jpl/
+
+# Cleanup
+# Remove lib after compilation
+rm -rf lib
+rm -r ./data/models/
+rm ./data/models/mModel.tflite
+rm ./data/preferences.json
diff --git a/GreenScreen/buildandroid.sh b/GreenScreen/buildandroid.sh
new file mode 100644
index 0000000..c0e2343
--- /dev/null
+++ b/GreenScreen/buildandroid.sh
@@ -0,0 +1,188 @@
+#! /bin/bash
+# Build the plugin for the project
+if [ -z $DAEMON ]; then
+    DAEMON="./../../daemon"
+    echo "DAEMON not provided, building for ${DAEMON}"
+fi
+if [ -z $ANDROID_NDK ]; then
+	ANDROID_NDK=/home/${USER}/Android/Sdk/ndk/21.1.6352462
+    echo "ANDROID_NDK not provided, building with ${ANDROID_NDK}"
+fi
+
+PLUGIN_NAME="GreenScreen"
+JPL_FILE_NAME=${PLUGIN_NAME}".jpl"
+SO_FILE_NAME="lib"${PLUGIN_NAME}".so"
+LIBS_DIR="/home/${USER}/Libs"
+DAEMON_SRC="${DAEMON}/src"
+CONTRIB_PATH="${DAEMON}/contrib"
+DESTINATION_PATH="./../build/"
+PLUGINS_LIB="../lib"
+
+#=========================================================
+#	Check if the ANDROID_ABI was provided
+#	if not, set default
+#=========================================================
+if [ -z $ANDROID_ABI ]; then
+    ANDROID_ABI="armeabi-v7a arm64-v8a"
+    echo "ANDROID_ABI not provided, building for ${ANDROID_ABI}"
+fi
+
+buildlib() {
+	echo $CURRENT_ABI
+	#=========================================================
+	#	ANDROID TOOLS
+	#=========================================================
+	export HOST_TAG=linux-x86_64
+	export TOOLCHAIN=$ANDROID_NDK/toolchains/llvm/prebuilt/$HOST_TAG
+
+	if [ $CURRENT_ABI = armeabi-v7a ]
+	then
+	export AR=$TOOLCHAIN/bin/arm-linux-android-ar
+	export AS=$TOOLCHAIN/bin/arm-linux-android-as
+	export CC=$TOOLCHAIN/bin/armv7a-linux-androideabi21-clang
+	export CXX=$TOOLCHAIN/bin/armv7a-linux-androideabi21-clang++
+	export LD=$TOOLCHAIN/bin/arm-linux-android-ld
+	export RANLIB=$TOOLCHAIN/bin/arm-linux-android-ranlib
+	export STRIP=$TOOLCHAIN/bin/arm-linux-androideabi-strip
+	export ANDROID_SYSROOT=./../../client-android/android-toolchain-21-arm/sysroot
+
+	elif [ $CURRENT_ABI = arm64-v8a ]
+	then
+	export AR=$TOOLCHAIN/bin/aarch64-linux-android-ar
+	export AS=$TOOLCHAIN/bin/aarch64-linux-android-as
+	export CC=$TOOLCHAIN/bin/aarch64-linux-android21-clang
+	export CXX=$TOOLCHAIN/bin/aarch64-linux-android21-clang++
+	export LD=$TOOLCHAIN/bin/aarch64-linux-android-ld
+	export RANLIB=$TOOLCHAIN/bin/aarch64-linux-android-ranlib
+	export STRIP=$TOOLCHAIN/bin/aarch64-linux-android-strip
+	export ANDROID_SYSROOT=./../../client-android/android-toolchain-21-arm64/sysroot
+
+	elif [ $CURRENT_ABI = x86_64 ]
+	then
+	export AR=$TOOLCHAIN/bin/x86_64-linux-android-ar
+	export AS=$TOOLCHAIN/bin/x86_64-linux-android-as
+	export CC=$TOOLCHAIN/bin/x86_64-linux-android21-clang
+	export CXX=$TOOLCHAIN/bin/x86_64-linux-android21-clang++
+	export LD=$TOOLCHAIN/bin/x86_64-linux-android-ld
+	export RANLIB=$TOOLCHAIN/bin/x86_64-linux-android-ranlib
+	export STRIP=$TOOLCHAIN/bin/x86_64-linux-android-strip
+	export ANDROID_SYSROOT=./../../client-android/android-toolchain-21-x86_64/sysroot
+
+	else
+	echo "ABI NOT OK" >&2
+	exit 1
+	fi
+
+	#=========================================================
+	#	CONTRIBS
+	#=========================================================
+	if [ $CURRENT_ABI = armeabi-v7a ]
+	then
+	CONTRIB_PLATFORM=arm-linux-androideabi
+
+	elif [ $CURRENT_ABI = arm64-v8a ]
+	then
+	CONTRIB_PLATFORM=aarch64-linux-android
+
+	elif [ $CURRENT_ABI = x86_64 ]
+	then
+	CONTRIB_PLATFORM=x86_64-linux-android
+	fi
+
+	# ASSETS
+	ANDROID_PROJECT_ASSETS=./../../client-android/ring-android/app/src/main/assets
+	# LIBS FOLDER
+	ANDROID_PROJECT_LIBS=./../../client-android/ring-android/app/src/main/libs/$CURRENT_ABI
+	#NDK SOURCES FOR cpufeatures
+	NDK_SOURCES=${ANDROID_NDK}/sources/android
+
+	#=========================================================
+	#	LD_FLAGS
+	#=========================================================
+	if [ $CURRENT_ABI = armeabi-v7a ]
+	then
+	export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/arm-linux-androideabi -L${ANDROID_SYSROOT}/usr/lib/arm-linux-androideabi/21"
+	elif [ $CURRENT_ABI = arm64-v8a ]
+	then
+	export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/aarch64-linux-android -L${ANDROID_SYSROOT}/usr/lib/aarch64-linux-android/21"
+	elif [ $CURRENT_ABI = x86_64 ]
+	then
+	export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/x86_64-linux-android -L${ANDROID_SYSROOT}/usr/lib/x86_64-linux-android/21"
+	fi
+
+	#=========================================================
+	#	Compile CPU FEATURES, NEEDED FOR OPENCV
+	#=========================================================
+	$CC -c $NDK_SOURCES/cpufeatures/cpu-features.c -o cpu-features.o -o cpu-features.o --sysroot=$ANDROID_SYSROOT
+
+	#=========================================================
+	#	Compile the plugin
+	#=========================================================
+
+	# Create so destination folder
+	mkdir -p lib/$CURRENT_ABI
+
+	# Create so destination folder
+    $CXX --std=c++14 -O3 -g -fPIC \
+	-Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
+	-shared \
+	-Wall -Wextra \
+	-Wno-unused-variable \
+	-Wno-unused-function \
+	-Wno-unused-parameter \
+	-DTFLITE \
+	-I"." \
+	-I${DAEMON_SRC} \
+	-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
+    -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
+    -I${LIBS_DIR}/_tensorflow_distribution/include/flatbuffers \
+	-I${LIBS_DIR}/_tensorflow_distribution/include \
+	-I${PLUGINS_LIB} \
+	main.cpp \
+	videoSubscriber.cpp \
+	pluginProcessor.cpp \
+    pluginMediaHandler.cpp \
+	TFInference.cpp \
+	pluginInference.cpp \
+	pluginParameters.cpp \
+	cpu-features.o \
+	-L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/ \
+	-L${LIBS_DIR}/_tensorflow_distribution/lib/${CURRENT_ABI}/ \
+	-lswscale \
+	-lavutil \
+	-lopencv_imgcodecs \
+	-lopencv_imgproc \
+	-lopencv_core \
+    -llibpng \
+    -ltensorflowlite \
+	-llog -lz \
+	--sysroot=$ANDROID_SYSROOT \
+	-o lib/$CURRENT_ABI/${SO_FILE_NAME}
+	# (above) Always put opencv_core after all other opencv libs when linking statically
+	# (above) Put libavutil after other ffmpeg libraries
+
+	cp ${LIBS_DIR}/_tensorflow_distribution/lib/${CURRENT_ABI}/libtensorflowlite.so lib/$CURRENT_ABI
+}
+
+
+mkdir ./data/models
+cp ./modelsSRC/mobilenet_v2_deeplab_v3_256_myquant.tflite ./data/models/mModel.tflite
+cp ./preferences-tflite.json ./data/preferences.json
+
+# Build the so
+for i in ${ANDROID_ABI}; do
+	CURRENT_ABI=$i
+	buildlib
+done
+
+#Export the plugin data folder
+mkdir -p ${DESTINATION_PATH}/jpl/${PLUGIN_NAME}/
+zip -r ${JPL_FILE_NAME} data manifest.json lib
+mv ${JPL_FILE_NAME} ${DESTINATION_PATH}/jpl/${PLUGIN_NAME}/
+
+# Cleanup
+# Remove cpu-features object after compilation
+rm cpu-features.o
+rm -rf lib
+rm -r ./data/models
+rm ./data/preferences.json
diff --git a/GreenScreen/buildtfcc.sh b/GreenScreen/buildtfcc.sh
new file mode 100644
index 0000000..2eef434
--- /dev/null
+++ b/GreenScreen/buildtfcc.sh
@@ -0,0 +1,94 @@
+#! /bin/bash
+# Build the plugin for the project
+if [ -z $DAEMON ]; then
+    DAEMON="./../../daemon"
+    echo "DAEMON not provided, building for ${DAEMON}"
+fi
+
+if [ -z $CUDALIBS ]; then
+    CUDALIBS=~/anaconda3/envs/tf114/lib/
+    echo "CUDALIBS not provided, building for ${CUDALIBS}"
+fi
+
+if [ -z $PROCESSOR ]; then
+    PROCESSOR=GPU
+    echo "PROCESSOR not defined, building for GPU"
+fi
+
+PLUGIN_NAME="GreenScreen"
+JPL_FILE_NAME=${PLUGIN_NAME}".jpl"
+SO_FILE_NAME="lib"${PLUGIN_NAME}".so"
+DAEMON_SRC="${DAEMON}/src"
+CONTRIB_PATH="${DAEMON}/contrib"
+DESTINATION_PATH="./../build/"
+PLUGINS_LIB="../lib"
+LIBS_DIR="/home/${USER}/Libs"
+
+
+CONTRIB_PLATFORM_CURT=x86_64
+CONTRIB_PLATFORM=${CONTRIB_PLATFORM_CURT}-linux-gnu
+
+mkdir -p lib/${CONTRIB_PLATFORM}
+mkdir -p ${DESTINATION_PATH}/jpl
+
+# Compile
+clang++ -std=c++17 -shared -fPIC \
+-Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
+-Wall -Wextra \
+-Wno-unused-variable \
+-Wno-unused-function \
+-Wno-unused-parameter \
+-D${PROCESSOR} \
+-I"." \
+-I${DAEMON_SRC} \
+-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
+-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
+-I${LIBS_DIR}/_tensorflow_cc/include \
+-I${LIBS_DIR}/_tensorflow_cc/include/third_party/eigen3 \
+-I${PLUGINS_LIB} \
+main.cpp \
+videoSubscriber.cpp \
+pluginProcessor.cpp \
+pluginMediaHandler.cpp \
+TFInference.cpp \
+pluginInference.cpp \
+pluginParameters.cpp \
+-L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/ \
+-L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty/ \
+-L${LIBS_DIR}/_tensorflow_cc/lib/${CONTRIB_PLATFORM}-gpu61/ \
+-lswscale \
+-lavutil \
+-lopencv_imgcodecs \
+-lopencv_imgproc \
+-lopencv_core \
+-ltensorflow_cc \
+-lpng \
+-o lib/${CONTRIB_PLATFORM}/${SO_FILE_NAME}
+# (above) Always put opencv_core after all other opencv libs
+# (above) Always put avutil after all other ffmpeg libs
+# (above) Always put png after all other libs
+
+cp ${LIBS_DIR}/_tensorflow_cc/lib/${CONTRIB_PLATFORM}-gpu61/libtensorflow_cc.so lib/$CONTRIB_PLATFORM/libtensorflow_cc.so.2
+cp /usr/lib/${CONTRIB_PLATFORM}/libswscale.so.4 lib/$CONTRIB_PLATFORM
+cp /usr/lib/${CONTRIB_PLATFORM}/libavutil.so.55 lib/$CONTRIB_PLATFORM
+cp /usr/lib/${CONTRIB_PLATFORM}/libpng16.so.16 lib/$CONTRIB_PLATFORM
+cp ${CUDALIBS}libcudart.so.10.0 lib/$CONTRIB_PLATFORM
+cp ${CUDALIBS}libcublas.so.10.0 lib/$CONTRIB_PLATFORM
+cp ${CUDALIBS}libcufft.so.10.0 lib/$CONTRIB_PLATFORM
+cp ${CUDALIBS}libcurand.so.10.0 lib/$CONTRIB_PLATFORM
+cp ${CUDALIBS}libcusolver.so.10.0 lib/$CONTRIB_PLATFORM
+cp ${CUDALIBS}libcusparse.so.10.0 lib/$CONTRIB_PLATFORM
+cp ${CUDALIBS}libcudnn.so.7 lib/$CONTRIB_PLATFORM
+
+mkdir ./data/models
+cp ./modelsSRC/mModel-resnet50float.pb ./data/models/mModel.pb
+cp ./preferences-tfcc.json ./data/preferences.json
+
+zip -r ${JPL_FILE_NAME} data manifest.json lib
+mv ${JPL_FILE_NAME} ${DESTINATION_PATH}/jpl/
+
+# Cleanup
+# Remove lib after compilation
+rm -rf lib
+rm ./data/models/mModel.pb
+rm ./data/preferences.json
diff --git a/GreenScreen/data/backgrounds/background1.png b/GreenScreen/data/backgrounds/background1.png
new file mode 100644
index 0000000..849251c
--- /dev/null
+++ b/GreenScreen/data/backgrounds/background1.png
Binary files differ
diff --git a/GreenScreen/data/backgrounds/background2.png b/GreenScreen/data/backgrounds/background2.png
new file mode 100644
index 0000000..1183a25
--- /dev/null
+++ b/GreenScreen/data/backgrounds/background2.png
Binary files differ
diff --git a/GreenScreen/data/icon.png b/GreenScreen/data/icon.png
new file mode 100644
index 0000000..f44370f
--- /dev/null
+++ b/GreenScreen/data/icon.png
Binary files differ
diff --git a/GreenScreen/main.cpp b/GreenScreen/main.cpp
new file mode 100644
index 0000000..658eba0
--- /dev/null
+++ b/GreenScreen/main.cpp
@@ -0,0 +1,64 @@
+/**
+ *  Copyright (C) 2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#include <iostream>
+#include <string.h>
+#include <thread>
+#include <memory>
+#include <plugin/jamiplugin.h>
+#include "pluginMediaHandler.h"
+
+#ifdef WIN32
+#define EXPORT_PLUGIN __declspec (dllexport)
+#else
+#define EXPORT_PLUGIN
+#endif
+
+#define GreenScreen_VERSION_MAJOR 1
+#define GreenScreen_VERSION_MINOR 0
+
+extern "C" 
+{
+void pluginExit(void) { }
+
+EXPORT_PLUGIN JAMI_PluginExitFunc
+JAMI_dynPluginInit(const JAMI_PluginAPI* api)
+{
+    std::cout << "**************************" << std::endl << std::endl;
+    std::cout << "**  GREENSCREEN PLUGIN  **" << std::endl;
+    std::cout << "**************************" << std::endl << std::endl;
+    std::cout << " Version " << GreenScreen_VERSION_MAJOR << "." << GreenScreen_VERSION_MINOR << std::endl;
+
+    // If invokeService doesn't return an error
+    if(api) 
+    {
+        std::map<std::string, std::string> ppm;
+        api->invokeService(api, "getPluginPreferences", &ppm);
+        std::string dataPath;
+        api->invokeService(api, "getPluginDataPath", &dataPath);
+        auto fmp = std::make_unique<jami::PluginMediaHandler>(std::move(ppm), std::move(dataPath));
+
+        if(!api->manageComponent(api,"CallMediaHandlerManager", fmp.release())) {
+            return pluginExit;
+        }
+    }
+    return nullptr;
+}
+}
diff --git a/GreenScreen/manifest.json b/GreenScreen/manifest.json
new file mode 100644
index 0000000..d534b0d
--- /dev/null
+++ b/GreenScreen/manifest.json
@@ -0,0 +1,5 @@
+{
+	"name": "GreenScreen",
+	"description" : "GreenScreen Plugin with Tensorflow 2.1.1",
+	"version" : "1.0"
+}
diff --git a/GreenScreen/modelsSRC/mModel-resnet50float.pb b/GreenScreen/modelsSRC/mModel-resnet50float.pb
new file mode 100644
index 0000000..b6c95a0
--- /dev/null
+++ b/GreenScreen/modelsSRC/mModel-resnet50float.pb
Binary files differ
diff --git a/GreenScreen/modelsSRC/mobilenet_v2_deeplab_v3_256_myquant.tflite b/GreenScreen/modelsSRC/mobilenet_v2_deeplab_v3_256_myquant.tflite
new file mode 100644
index 0000000..6a7f99c
--- /dev/null
+++ b/GreenScreen/modelsSRC/mobilenet_v2_deeplab_v3_256_myquant.tflite
Binary files differ
diff --git a/GreenScreen/modelsSRC/model_256_F_16.tflite b/GreenScreen/modelsSRC/model_256_F_16.tflite
new file mode 100644
index 0000000..f022db6
--- /dev/null
+++ b/GreenScreen/modelsSRC/model_256_F_16.tflite
Binary files differ
diff --git a/GreenScreen/modelsSRC/model_256_Qlatency.tflite b/GreenScreen/modelsSRC/model_256_Qlatency.tflite
new file mode 100644
index 0000000..3d54e05
--- /dev/null
+++ b/GreenScreen/modelsSRC/model_256_Qlatency.tflite
Binary files differ
diff --git a/GreenScreen/modelsSRC/model_256_Qlatency_16.tflite b/GreenScreen/modelsSRC/model_256_Qlatency_16.tflite
new file mode 100644
index 0000000..9fb7771
--- /dev/null
+++ b/GreenScreen/modelsSRC/model_256_Qlatency_16.tflite
Binary files differ
diff --git a/GreenScreen/pluginInference.cpp b/GreenScreen/pluginInference.cpp
new file mode 100644
index 0000000..cb9d32a
--- /dev/null
+++ b/GreenScreen/pluginInference.cpp
@@ -0,0 +1,222 @@
+/**
+ *  Copyright (C) 2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#include "pluginInference.h"
+// Std libraries
+#include <cstring>
+#include <numeric>
+#include "pluglog.h"
+
+const char sep = separator();
+const std::string TAG = "FORESEG";
+
+namespace jami {
+
+PluginInference::PluginInference(TFModel model) : TensorflowInference(model)
+{
+#ifndef TFLITE
+	//Initialize TENSORFLOW_CC lib
+	static const char* kFakeName = "fake program name";
+	int argc = 1;
+	char* fake_name_copy = strdup(kFakeName);
+	char** argv = &fake_name_copy;
+	tensorflow::port::InitMain(kFakeName, &argc, &argv);
+	if (argc > 1) {
+		Plog::log(Plog::LogPriority::INFO, "TENSORFLOW INIT", "Unknown argument " );
+	}
+	free(fake_name_copy);
+#endif	//TFLITE
+}
+
+PluginInference::~PluginInference(){}
+
+#ifdef TFLITE
+std::pair<uint8_t*, std::vector<int>>
+PluginInference::getInput()
+{
+	// We assume that we have only one input
+	// Get the input index
+	int input = interpreter->inputs()[0];
+
+	uint8_t *inputDataPointer = interpreter->typed_tensor<uint8_t>(input);
+	// Get the input dimensions vector
+	std::vector<int> dims = getTensorDimensions(input);
+
+	return std::make_pair(inputDataPointer, dims);
+}
+
+// // Types returned by tensorflow
+// int type = interpreter->tensor(outputIndex)->type
+// typedef enum {
+// kTfLiteNoType = 0,
+// kTfLiteFloat32 = 1, float
+// kTfLiteInt32 = 2, int // int32_t
+// kTfLiteUInt8 = 3, uint8_t
+// kTfLiteInt64 = 4, int64_t
+// kTfLiteString = 5,
+// kTfLiteBool = 6,
+// kTfLiteInt16 = 7, int16_t
+// kTfLiteComplex64 = 8,
+// kTfLiteInt8 = 9, int8_t
+// kTfLiteFloat16 = 10, float16_t
+// } TfLiteType;
+
+std::vector<float>
+PluginInference::masksPredictions() const
+{
+	int outputIndex = interpreter->outputs()[0];
+	std::vector<int> dims = getTensorDimensions(outputIndex);
+	int totalDimensions = 1;
+	for (size_t i = 0; i < dims.size(); i++)
+	{
+		totalDimensions *= dims[i];
+	}
+	std::vector<float> out;
+
+	int type = interpreter->tensor(outputIndex)->type;
+	switch(type) {
+		case 1:
+		{
+			float* outputDataPointer = interpreter->typed_tensor<float>(outputIndex);
+			std::vector<float> output(outputDataPointer, outputDataPointer + totalDimensions);
+			out=std::vector<float>(output.begin(), output.end());
+			break;
+		}
+		case 2:
+		{
+			int* outputDataPointer = interpreter->typed_tensor<int>(outputIndex);
+			std::vector<int> output(outputDataPointer, outputDataPointer + totalDimensions);
+			out=std::vector<float>(output.begin(), output.end());
+			break;
+		}
+		case 4:
+		{
+			int64_t* outputDataPointer = interpreter->typed_tensor<int64_t>(outputIndex);
+			std::vector<int64_t> output(outputDataPointer, outputDataPointer + totalDimensions);
+			out=std::vector<float>(output.begin(), output.end());
+			break;
+		}
+	}
+
+	return out;
+}
+
+void
+PluginInference::setExpectedImageDimensions()
+{
+	// We assume that we have only one input
+	// Get the input index
+	int input = interpreter->inputs()[0];
+	// Get the input dimensions vector
+	std::vector<int> dims = getTensorDimensions(input);
+	
+	imageWidth = dims.at(1);
+	imageHeight = dims.at(2);
+	imageNbChannels = dims.at(3);
+}
+#else //TFLITE
+// Given an image file name, read in the data, try to decode it as an image,
+// resize it to the requested size, and then scale the values as desired.
+void
+PluginInference::ReadTensorFromMat(const cv::Mat& image)
+{
+	imageTensor = tensorflow::Tensor(tensorflow::DataType::DT_FLOAT, tensorflow::TensorShape({ 1, image.cols, image.rows, 3 }));
+	float* p = imageTensor.flat<float>().data();
+	cv::Mat temp(image.rows, image.cols, CV_32FC3, p);
+	image.convertTo(temp, CV_32FC3);
+}
+
+std::vector<float>
+PluginInference::masksPredictions() const
+{
+	std::vector<int> dims;
+	int flatSize = 1;
+	int num_dimensions = outputs[0].shape().dims();
+	for(int ii_dim=0; ii_dim<num_dimensions; ii_dim++) {
+		dims.push_back(outputs[0].shape().dim_size(ii_dim));
+		flatSize *= outputs[0].shape().dim_size(ii_dim);
+	}
+
+	std::vector<float> out;
+	int type = outputs[0].dtype();
+
+	switch(type) {
+		case tensorflow::DataType::DT_FLOAT:
+		{
+			for (int offset = 0; offset < flatSize; offset++) {
+				out.push_back(outputs[0].flat<float>()(offset));
+			}
+			break;
+		}
+		case tensorflow::DataType::DT_INT32:
+		{
+			for (int offset = 0; offset < flatSize; offset++) {
+				out.push_back(static_cast<float> (outputs[0].flat<tensorflow::int32>()(offset)));
+			}
+			break;
+		}
+		case tensorflow::DataType::DT_INT64:
+		{
+			for (int offset = 0; offset < flatSize; offset++) {
+				out.push_back(static_cast<float> (outputs[0].flat<tensorflow::int64>()(offset)));
+			}
+			break;
+		}
+		default:
+		{
+			for (int offset = 0; offset < flatSize; offset++) {
+				out.push_back(0);
+			}
+			break;
+		}
+	}
+	return out;
+}
+
+void
+PluginInference::setExpectedImageDimensions()
+{
+	if (tfModel.dims[1] != 0)
+		imageWidth = tfModel.dims[1];
+	if (tfModel.dims[2] != 0)
+		imageHeight = tfModel.dims[2];
+	if (tfModel.dims[3] != 0)
+		imageNbChannels = tfModel.dims[3];
+}
+#endif
+
+int
+PluginInference::getImageWidth() const
+{
+	return imageWidth;
+}
+
+int
+PluginInference::getImageHeight() const
+{
+	return imageHeight;
+}
+
+int
+PluginInference::getImageNbChannels() const
+{
+	return imageNbChannels;
+}
+} // namespace jami
diff --git a/GreenScreen/pluginInference.h b/GreenScreen/pluginInference.h
new file mode 100644
index 0000000..37436af
--- /dev/null
+++ b/GreenScreen/pluginInference.h
@@ -0,0 +1,84 @@
+/**
+ *  Copyright (C) 2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#pragma once
+
+#include "TFInference.h"
+
+// OpenCV headers
+#include <opencv2/core.hpp>
+// STL
+#include <array>
+#include <vector>
+#include <tuple>
+#include <iostream>
+
+namespace jami {
+
+class PluginInference : public TensorflowInference {
+public:
+	/**
+	 * @brief PluginInference
+	 * Is a type of supervised learning where we detect objects in images
+	 * Draw a bounding boxes around them
+	 * @param model
+	 */
+	PluginInference(TFModel model);
+	~PluginInference();
+
+#ifdef TFLITE
+	/**
+	 * @brief getInput
+	 * Returns the input where to fill the data
+	 * Use this method if you know what you are doing, all the necessary checks
+	 * on dimensions must be done on your part
+	 * @return std::tuple<uint8_t *, std::vector<int>>
+	 * The first element in the tuple is the pointer to the storage location
+	 * The second element is a dimensions vector that will helps you make
+	 * The necessary checks to make your data size match the input one
+	 */
+	std::pair<uint8_t*, std::vector<int>> getInput();
+
+#else
+	void ReadTensorFromMat(const cv::Mat& image);
+
+#endif //TFLITE
+
+	std::vector<float> masksPredictions() const;
+
+
+	/**
+	 * @brief setExpectedImageDimensions
+	 * Sets imageWidth and imageHeight from the sources
+	 */
+	void setExpectedImageDimensions();
+
+	// Getters
+	int getImageWidth() const;
+	int getImageHeight() const;
+	int getImageNbChannels() const;
+
+
+private:
+	int imageWidth = 0;
+	int imageHeight = 0;
+	int imageNbChannels = 0;
+};
+} // namespace jami
diff --git a/GreenScreen/pluginMediaHandler.cpp b/GreenScreen/pluginMediaHandler.cpp
new file mode 100644
index 0000000..41e334a
--- /dev/null
+++ b/GreenScreen/pluginMediaHandler.cpp
@@ -0,0 +1,110 @@
+/**
+ *  Copyright (C) 2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#include "pluginMediaHandler.h"
+// Logger
+#include "pluglog.h"
+const char sep = separator();
+const std::string TAG = "FORESEG";
+
+#define NAME "Foreground Segmentation"
+
+namespace jami {
+
+PluginMediaHandler::PluginMediaHandler(std::map<std::string, std::string>&& ppm, std::string&& datapath):
+	datapath_{datapath}, ppm_{ppm}
+{
+	setGlobalPluginParameters(ppm_);
+	setId(datapath_);
+	mVS = std::make_shared<VideoSubscriber>(datapath_);
+}
+
+void
+PluginMediaHandler::notifyAVFrameSubject(const StreamData& data, jami::avSubjectPtr subject)
+{
+	Plog::log(Plog::LogPriority::INFO, TAG, "IN AVFRAMESUBJECT");
+	std::ostringstream oss;
+	std::string direction = data.direction ? "Receive" : "Preview";
+	oss << "NEW SUBJECT: [" << data.id << "," << direction << "]" << std::endl;
+
+	bool preferredStreamDirection = false;
+	if (!ppm_.empty() && ppm_.find("streamslist") != ppm_.end()) {
+		Plog::log(Plog::LogPriority::INFO, TAG, "SET PARAMETERS");
+		preferredStreamDirection = ppm_.at("streamslist")=="in"?true:false;
+	}
+	oss << "preferredStreamDirection " << preferredStreamDirection << std::endl;
+	if (data.type == StreamType::video && !data.direction && data.direction == preferredStreamDirection) {
+		subject->attach(mVS.get()); // my image
+		oss << "got my sent image attached" << std::endl;
+	}
+	else if (data.type == StreamType::video && data.direction && data.direction == preferredStreamDirection)
+		subject->attach(mVS.get()); // the image I receive from the others on the call
+
+	Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+}
+
+std::map<std::string, std::string>
+PluginMediaHandler::getCallMediaHandlerDetails()
+{
+	std::map<std::string, std::string> mediaHandlerDetails = {};
+	mediaHandlerDetails["name"] = NAME;
+	mediaHandlerDetails["iconPath"] = datapath_ + sep + "icon.png";
+	mediaHandlerDetails["pluginId"] = id();
+
+	return mediaHandlerDetails;
+}
+
+void
+PluginMediaHandler::setPreferenceAttribute(const std::string& key, const std::string& value)
+{
+	auto it = ppm_.find(key);
+	if (it != ppm_.end()) {
+		if (ppm_[key] != value) {
+			ppm_[key] = value;
+			if (key == "backgroundlist") {
+				mVS->setBackground(dataPath(), value);
+			}
+		}
+	}
+}
+
+bool
+PluginMediaHandler::preferenceMapHasKey(const std::string& key)
+{
+	if (ppm_.find(key) == ppm_.end()) {
+		return false;
+	}
+	return true;
+}
+
+void
+PluginMediaHandler::detach()
+{
+	mVS->detach();
+}
+
+PluginMediaHandler::~PluginMediaHandler()
+{
+	std::ostringstream oss;
+	oss << " ~FORESEG Plugin" << std::endl;
+	Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+	detach();
+}
+}
diff --git a/GreenScreen/pluginMediaHandler.h b/GreenScreen/pluginMediaHandler.h
new file mode 100644
index 0000000..1a92a6e
--- /dev/null
+++ b/GreenScreen/pluginMediaHandler.h
@@ -0,0 +1,56 @@
+/**
+ *  Copyright (C) 2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#pragma once
+
+//Project
+#include "videoSubscriber.h"
+
+// Plugin
+#include "plugin/jamiplugin.h"
+#include "plugin/mediahandler.h"
+
+using avSubjectPtr = std::weak_ptr<jami::Observable<AVFrame*>>;
+
+namespace jami {
+
+class PluginMediaHandler : public jami::CallMediaHandler {
+public:
+	PluginMediaHandler(std::map<std::string, std::string>&& ppm, std::string&& dataPath);
+	~PluginMediaHandler() override;
+
+	virtual void notifyAVFrameSubject(const StreamData& data, avSubjectPtr subject) override;
+	virtual std::map<std::string, std::string> getCallMediaHandlerDetails() override;
+
+	virtual void detach() override;
+	virtual void setPreferenceAttribute(const std::string& key, const std::string& value) override;
+	
+	std::shared_ptr<VideoSubscriber> mVS;
+	
+	std::string dataPath() const { return datapath_; }
+
+private:
+	bool preferenceMapHasKey(const std::string& key);
+
+private:
+	const std::string datapath_;
+	std::map<std::string, std::string> ppm_;
+};
+}
diff --git a/GreenScreen/pluginParameters.cpp b/GreenScreen/pluginParameters.cpp
new file mode 100644
index 0000000..c118ebd
--- /dev/null
+++ b/GreenScreen/pluginParameters.cpp
@@ -0,0 +1,56 @@
+/**
+ *  Copyright (C) 2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#include "pluginParameters.h"// Logger
+#include "pluglog.h"
+
+PluginParameters pluginParameters;
+
+void
+setGlobalPluginParameters(std::map<std::string, std::string> pp)
+{
+    if (!pp.empty()) {
+        if(pp.find("streamslist") != pp.end()) {
+            pluginParameters.stream = pp.at("streamslist");
+            Plog::log(Plog::LogPriority::INFO, "GLOBAL STREAM ", pluginParameters.stream);
+        }
+        if(pp.find("modellist") != pp.end()) {
+            pluginParameters.model = pp.at("modellist");
+            Plog::log(Plog::LogPriority::INFO, "GLOBAL MODEL ", pluginParameters.model);
+        }
+        if(pp.find("backgroundlist") != pp.end()) {
+            pluginParameters.image = pp.at("backgroundlist");
+            Plog::log(Plog::LogPriority::INFO, "GLOBAL IMAGE ", pluginParameters.image);
+        }
+    }
+}
+
+void getGlobalPluginParameters(PluginParameters* mPluginParameters)
+{
+    mPluginParameters->image = pluginParameters.image;
+    mPluginParameters->model = pluginParameters.model;
+    mPluginParameters->stream = pluginParameters.stream;
+}
+
+
+PluginParameters* getGlobalPluginParameters()
+{
+    return &pluginParameters;
+}
\ No newline at end of file
diff --git a/GreenScreen/pluginParameters.h b/GreenScreen/pluginParameters.h
new file mode 100644
index 0000000..c774aeb
--- /dev/null
+++ b/GreenScreen/pluginParameters.h
@@ -0,0 +1,48 @@
+/**
+ *  Copyright (C) 2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#ifndef _PLUGINPARAMETERS_H_
+#define _PLUGINPARAMETERS_H_
+
+#include <string>
+#include <map>
+
+struct PluginParameters {
+    std::string stream = "out";
+#ifdef TFLITE
+    bool useGPU = false;
+    std::string model = "mModel.tflite";
+#else
+    #ifndef CPU
+        bool useGPU = true;
+    #else
+        bool useGPU = false;
+    #endif
+        std::string model = "mModel.pb";
+#endif //TFLITE
+    std::string image = "background2.png";
+};
+
+void setGlobalPluginParameters(std::map<std::string, std::string> pp);
+
+void getGlobalPluginParameters(PluginParameters* mPluginParameters);
+PluginParameters* getGlobalPluginParameters();
+
+#endif //__PLUGINPREFERENCE_H_
\ No newline at end of file
diff --git a/GreenScreen/pluginProcessor.cpp b/GreenScreen/pluginProcessor.cpp
new file mode 100644
index 0000000..ea846e3
--- /dev/null
+++ b/GreenScreen/pluginProcessor.cpp
@@ -0,0 +1,303 @@
+/**
+ *  Copyright (C) 2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#include "pluginProcessor.h"
+// System includes
+#include <algorithm>
+#include <cstring>
+// OpenCV headers
+#include <opencv2/imgproc.hpp>
+#include <opencv2/imgcodecs.hpp>
+#include <opencv2/core.hpp>
+// Logger
+#include <pluglog.h>
+
+extern "C" {
+#include <libavutil/display.h>
+}
+
+const char sep = separator();
+
+const std::string TAG = "FORESEG";
+
+PluginParameters* mPluginParameters = getGlobalPluginParameters();
+
+namespace jami
+{
+
+PluginProcessor::PluginProcessor(const std::string& dataPath):
+pluginInference{TFModel{dataPath + sep + "models/" + mPluginParameters->model}}
+{
+	initModel();
+	setBackgroundImage(dataPath, mPluginParameters->image);
+}
+
+void
+PluginProcessor::setBackgroundImage(const std::string& dataPath, const std::string& value)
+{
+	backgroundPath = dataPath + sep + "backgrounds" + sep + value;
+	cv::Size size = cv::Size{0,0};
+
+	if (!backgroundImage.empty())
+		size = backgroundImage.size();
+
+	backgroundImage = cv::imread(backgroundPath);
+	if (backgroundImage.cols == 0) {
+		Plog::log(Plog::LogPriority::ERR, TAG, "Background image not Loaded");
+	}
+	else {
+		Plog::log(Plog::LogPriority::INFO, TAG, "Background image Loaded");
+	}
+
+	cv::cvtColor(backgroundImage, backgroundImage, cv::COLOR_BGR2RGB);
+	backgroundImage.convertTo(backgroundImage, CV_32FC3);
+	if (size.height) {
+		cv::resize(backgroundImage, backgroundImage, size);
+		backgroundRotation = 0;
+	}
+}
+
+void
+PluginProcessor::initModel()
+{
+	try {
+		pluginInference.init();
+	}
+	catch (std::exception& e) {
+		Plog::log(Plog::LogPriority::ERR, TAG, e.what());
+	}
+	std::ostringstream oss;
+	oss << "Model is allocated " << pluginInference.isAllocated();
+	Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+}
+
+
+#ifdef TFLITE
+void
+PluginProcessor::feedInput(const cv::Mat& frame)
+{
+	auto pair = pluginInference.getInput();
+	uint8_t* inputPointer = pair.first;
+
+	cv::Mat temp(frame.rows, frame.cols, CV_8UC3, inputPointer);
+	frame.convertTo(temp, CV_8UC3);
+
+	inputPointer = nullptr;
+}
+#else
+void
+PluginProcessor::feedInput(const cv::Mat& frame)
+{
+	pluginInference.ReadTensorFromMat(frame);
+}
+#endif //TFLITE
+
+int
+PluginProcessor::getBackgroundRotation()
+{
+	return backgroundRotation;
+}
+
+void
+PluginProcessor::setBackgroundRotation(int angle)
+{
+	if (backgroundRotation != angle && (backgroundRotation - angle) != 0) {
+		switch (backgroundRotation - angle) {
+			case 90:
+				cv::rotate(backgroundImage, backgroundImage, cv::ROTATE_90_CLOCKWISE);
+				break;
+			case 180:
+				cv::rotate(backgroundImage, backgroundImage, cv::ROTATE_180);
+				break;
+			case -180:
+				cv::rotate(backgroundImage, backgroundImage, cv::ROTATE_180);
+				break;
+			case -90:
+				cv::rotate(backgroundImage, backgroundImage, cv::ROTATE_90_COUNTERCLOCKWISE);
+				break;
+		}
+		backgroundRotation = angle;
+	}
+}
+
+void
+PluginProcessor::computePredictions() 
+{
+	// Run the graph
+	pluginInference.runGraph();
+	auto predictions = pluginInference.masksPredictions();
+
+	// Save the predictions
+	computedMask = predictions;
+}
+
+void
+PluginProcessor::printMask()
+{
+	for (size_t i = 0; i < computedMask.size(); i++)
+	{
+		// Log the predictions
+		std::ostringstream oss;
+		oss << "\nclass: "<< computedMask[i] << std::endl;
+		Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+	}
+}
+
+
+void
+copyByLine(uchar* frameData, uchar* applyMaskData, const int lineSize, cv::Size size)
+{
+	if (3 * size.width == lineSize) {
+		std::memcpy(frameData, applyMaskData, size.height * size.width * 3);;
+	}
+	else {
+		int rows = size.height;
+		int offset = 0;
+		int maskoffset = 0;
+		for (int i = 0; i < rows; i++) {
+			std::memcpy(frameData + offset, applyMaskData + maskoffset, lineSize);
+			offset += lineSize;
+			maskoffset += 3 * size.width;
+		}
+	}
+}
+
+void
+PluginProcessor::drawMaskOnFrame(cv::Mat& frame,
+								 cv::Mat& frameReduced, 
+								 std::vector<float>computedMask, 
+								 int lineSize, int angle)
+{
+	if (computedMask.empty()) {
+		return;
+	}
+	if (previousMasks[0].empty()) {
+		previousMasks[0] = cv::Mat(frameReduced.rows, frameReduced.cols, CV_32FC1, double(0.));
+		previousMasks[1] = cv::Mat(frameReduced.rows, frameReduced.cols, CV_32FC1, double(0.));
+	}	
+	int maskSize = static_cast<int> (std::sqrt(computedMask.size()));
+	cv::Mat maskImg(maskSize, maskSize, CV_32FC1, computedMask.data());
+
+	rotateFrame(-angle, maskImg);
+#ifdef TFLITE
+	for (int i = 0; i < maskImg.cols; i++) {
+		for (int j = 0; j < maskImg.rows; j++) {
+			if (maskImg.at<float>(j, i) == 15)
+				maskImg.at<float>(j, i) = 255.;
+			else
+				maskImg.at<float>(j, i) = (float)((int)((0.6 * maskImg.at<float>(j, i) + 0.3 * previousMasks[0].at<float>(j, i) + 0.1 * previousMasks[1].at<float>(j, i))) % 256);
+		}
+	}
+#else // TFLITE
+	cv::resize(maskImg, maskImg, cv::Size(frameReduced.cols, frameReduced.rows));
+
+	double m, M;
+	cv::minMaxLoc(maskImg, &m, &M);
+
+	if (M < 2) { //avoid detection if there is any one in frame
+		maskImg = 0. * maskImg;
+	}
+	else {
+		for (int i = 0; i < maskImg.cols; i++) {
+			for (int j = 0; j < maskImg.rows; j++) {
+				maskImg.at<float>(j, i) = (maskImg.at<float>(j, i) - m) / (M - m);
+
+				if (maskImg.at<float>(j, i) < 0.4)
+					maskImg.at<float>(j, i) = 0.;
+				else if (maskImg.at<float>(j, i) < 0.7) {
+					float value = maskImg.at<float>(j, i) * 0.6 + previousMasks[0].at<float>(j, i) * 0.3 + previousMasks[1].at<float>(j, i) * 0.1;
+					maskImg.at<float>(j, i) = 0.;
+					if (value > 0.7)
+						maskImg.at<float>(j, i) = 1.;
+				}
+				else
+					maskImg.at<float>(j, i) = 1.;
+			}
+		}
+	}	
+#endif
+
+	previousMasks[1] = previousMasks[0].clone();
+	previousMasks[0] = maskImg.clone();
+
+	kSize = cv::Size(maskImg.cols * 0.05, maskImg.rows * 0.05);
+	if (kSize.height % 2 == 0)
+		kSize.height -= 1;
+	if (kSize.width % 2 == 0)
+		kSize.width -= 1;
+
+#ifndef TFLITE
+	cv::dilate(maskImg, maskImg, cv::getStructuringElement(cv::MORPH_CROSS, kSize));
+	maskImg = maskImg * 255.;
+#endif
+	GaussianBlur (maskImg, maskImg, kSize, 0); //mask from 0 to 255.
+	maskImg = maskImg / 255.;
+
+	cv::Mat applyMask = frameReduced.clone();
+	cv::Mat roiMaskImg = maskImg.clone();
+	cv::Mat roiMaskImgComplementary = 1. - roiMaskImg; //mask from 1. to 0
+
+	std::vector<cv::Mat> channels;
+	std::vector<cv::Mat> channelsComplementary;
+
+	channels.emplace_back(roiMaskImg);
+	channels.emplace_back(roiMaskImg);
+	channels.emplace_back(roiMaskImg);
+	channelsComplementary.emplace_back(roiMaskImgComplementary);
+	channelsComplementary.emplace_back(roiMaskImgComplementary);
+	channelsComplementary.emplace_back(roiMaskImgComplementary);
+
+	cv::merge(channels, roiMaskImg);
+	cv::merge(channelsComplementary, roiMaskImgComplementary);
+
+	int origType = frameReduced.type();
+	int roiMaskType = roiMaskImg.type();
+
+	applyMask.convertTo(applyMask, roiMaskType);
+	applyMask = applyMask.mul(roiMaskImg);
+	applyMask += backgroundImage.mul(roiMaskImgComplementary);
+	applyMask.convertTo(applyMask, origType);
+
+	cv::resize(applyMask, applyMask, cv::Size(frame.cols, frame.rows));
+
+	copyByLine(frame.data, applyMask.data, lineSize, cv::Size(frame.cols, frame.rows));
+}
+
+void
+PluginProcessor::rotateFrame(int angle, cv::Mat& mat)
+{
+	if (angle != 0) {
+		switch (angle) {
+			case -90:
+				cv::rotate(mat, mat, cv::ROTATE_90_COUNTERCLOCKWISE);
+				break;
+			case 180:
+				cv::rotate(mat, mat, cv::ROTATE_180);
+				break;
+			case -180:
+				cv::rotate(mat, mat, cv::ROTATE_180);
+				break;
+			case 90:
+				cv::rotate(mat, mat, cv::ROTATE_90_CLOCKWISE);
+				break;
+		}
+	}
+}
+} // namespace jami
diff --git a/GreenScreen/pluginProcessor.h b/GreenScreen/pluginProcessor.h
new file mode 100644
index 0000000..ea6ba29
--- /dev/null
+++ b/GreenScreen/pluginProcessor.h
@@ -0,0 +1,88 @@
+/**
+ *  Copyright (C) 2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#pragma once
+// STL
+#include <condition_variable>
+#include <cstdint>
+#include <memory>
+#include <mutex>
+#include <thread>
+#include <vector>
+#include <map>
+// Filters
+#include "pluginInference.h"
+// AvFrame
+extern "C" {
+#include <libavutil/frame.h>
+}
+// Plugin
+#include <plugin/jamiplugin.h>
+#include <plugin/mediahandler.h>
+// Frame scaler for frame transformations
+#include <framescaler.h>
+
+namespace jami {
+
+class PluginProcessor {
+public:
+	PluginProcessor(const std::string& dataPath);
+
+	void initModel();
+	/**
+	 * @brief feedInput
+	 * Takes a frame and feeds it to the model storage for predictions
+	 * @param frame
+	 */
+	void feedInput(const cv::Mat& frame);
+
+	/**
+	 * @brief computePredictions
+	 * Uses the model to compute the predictions and store them in
+	 * computedPredictions
+	 */
+	void computePredictions();
+
+	void printMask();
+	void drawMaskOnFrame(cv::Mat& frame, cv::Mat& frameReduced, std::vector<float>computedMask, int lineSize, int angle);
+	int getBackgroundRotation();
+	void setBackgroundRotation(int angle);
+	void setBackgroundImage(const std::string& dataPath, const std::string& value);
+	void rotateFrame(int angle, cv::Mat& mat);
+
+	// Output predictions
+	std::vector<float> computedMask;
+
+	cv::Mat previousMasks[2];
+	cv::Mat backgroundImage;
+
+	cv::Size kSize;
+	float scaleX = 0;
+	float scaleY = 0;
+
+	PluginInference pluginInference;
+	std::string backgroundPath;
+
+private:
+	// Frame
+	cv::Mat frame;
+	int backgroundRotation = 0;
+};
+} // namespace jami
diff --git a/GreenScreen/preferences-tfcc.json b/GreenScreen/preferences-tfcc.json
new file mode 100644
index 0000000..7890955
--- /dev/null
+++ b/GreenScreen/preferences-tfcc.json
@@ -0,0 +1,35 @@
+[
+    {
+        "category" : "StreamsListPreference",
+        "type": "List",
+        "key": "streamslist",
+        "title": "Streams to transform",
+        "summary": "Select video to transform",
+        "defaultValue": "out",
+        "entries": ["sent", "received"],
+        "entryValues": ["out", "in"],
+        "editable": "true"
+    },
+    {
+        "category" : "ModelPreference",
+        "type": "List",
+        "key": "modellist",
+        "title": "Model to load",
+        "summary": "Select the model to use",
+        "defaultValue": "mModel.pb",
+        "entries": ["mModel"],
+        "entryValues": ["mModel.pb"],
+        "editable": "false"
+    },
+    {
+        "category" : "ImageBackground",
+        "type": "List",
+        "key": "backgroundlist",
+        "title": "Background image",
+        "summary": "Select the image background to use",
+        "defaultValue": "background2.png",
+        "entries": ["Painture", "Beach"],
+        "entryValues": ["background1.png", "background2.png"],
+        "editable": "true"
+    }
+]
diff --git a/GreenScreen/preferences-tflite.json b/GreenScreen/preferences-tflite.json
new file mode 100644
index 0000000..54beba7
--- /dev/null
+++ b/GreenScreen/preferences-tflite.json
@@ -0,0 +1,35 @@
+[
+    {
+        "category" : "StreamsListPreference",
+        "type": "List",
+        "key": "streamslist",
+        "title": "Streams to transform",
+        "summary": "Select video to transform",
+        "defaultValue": "out",
+        "entries": ["sent", "received"],
+        "entryValues": ["out", "in"],
+        "editable": "true"
+    },
+    {
+        "category" : "ModelPreference",
+        "type": "List",
+        "key": "modellist",
+        "title": "Model to load",
+        "summary": "Select the model to use",
+        "defaultValue": "mModel.tflite",
+        "entries": ["mModel"],
+        "entryValues": ["mModel.tflite"],
+        "editable": "false"
+    },
+    {
+        "category" : "ImageBackground",
+        "type": "List",
+        "key": "backgroundlist",
+        "title": "Background image",
+        "summary": "Select the image background to use",
+        "defaultValue": "background2.png",
+        "entries": ["Painture", "Beach"],
+        "entryValues": ["background1.png", "background2.png"],
+        "editable": "true"
+    }
+]
diff --git a/GreenScreen/videoSubscriber.cpp b/GreenScreen/videoSubscriber.cpp
new file mode 100644
index 0000000..7fd4cb6
--- /dev/null
+++ b/GreenScreen/videoSubscriber.cpp
@@ -0,0 +1,203 @@
+/**
+ *  Copyright (C) 2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#include "videoSubscriber.h"
+// Use for display rotation matrix
+extern "C" {
+#include <libavutil/display.h>
+#include <accel.h>
+}
+
+// Opencv processing
+#include <opencv2/imgproc.hpp>
+#include <opencv2/imgcodecs.hpp>
+
+// LOGGING
+#include <pluglog.h>
+
+const std::string TAG = "FORESEG";
+const char sep = separator();
+
+namespace jami {
+
+VideoSubscriber::VideoSubscriber(const std::string& dataPath): path_{dataPath},
+pluginProcessor{dataPath}
+{
+	/**
+	 * Waits for new frames and then process them
+	 * Writes the predictions in computedPredictions
+	 **/
+	processFrameThread = std::thread([this] {
+		while (running)	{
+			std::unique_lock<std::mutex> l(inputLock);
+			inputCv.wait(l, [this] { return not running or newFrame; });
+			if (not running) {
+				break;
+			}
+
+			pluginProcessor.feedInput(fcopy.resizedFrameRGB);
+			newFrame = false;
+			/** Unclock the mutex, this way we let the other thread
+			 *  copy new data while we are processing the old one
+			 **/
+			l.unlock();
+			pluginProcessor.computePredictions();
+		}
+	});
+}
+
+VideoSubscriber::~VideoSubscriber()
+{
+	std::ostringstream oss;
+	oss << "~MediaProcessor" << std::endl;
+	stop();
+	processFrameThread.join();
+	Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+}
+
+void
+VideoSubscriber::update(jami::Observable<AVFrame*> *, AVFrame* const &iFrame)
+{
+	if (pluginProcessor.pluginInference.isAllocated()) {
+		if (!iFrame)
+			return;
+		AVFrame * pluginFrame = const_cast<AVFrame *>(iFrame);
+
+		//======================================================================================
+		// GET FRAME ROTATION
+		AVFrameSideData* side_data =
+			av_frame_get_side_data(iFrame, AV_FRAME_DATA_DISPLAYMATRIX);
+
+		int angle{ 0 };
+		if (side_data) {
+			auto matrix_rotation = reinterpret_cast<int32_t*>(side_data->data);
+			angle = static_cast<int>(av_display_rotation_get(matrix_rotation));
+		}
+		pluginFrame = transferToMainMemory(pluginFrame, AV_PIX_FMT_NV12);
+
+		//======================================================================================
+		// GET RAW FRAME
+		// Use a non-const Frame
+		// Convert input frame to RGB
+		int inputHeight = pluginFrame->height;
+		int inputWidth = pluginFrame->width;
+
+		fcopy.originalSize = cv::Size{inputWidth, inputHeight};
+		FrameUniquePtr bgrFrame = scaler.convertFormat(pluginFrame, AV_PIX_FMT_RGB24);
+		cv::Mat frame =
+			cv::Mat{bgrFrame->height, bgrFrame->width, CV_8UC3, bgrFrame->data[0],
+					static_cast<size_t>(bgrFrame->linesize[0])};
+		// First clone the frame as the original one is unusable because of
+		// linespace
+
+		cv::Mat clone = frame.clone();
+		//======================================================================================
+
+		pluginProcessor.setBackgroundRotation(angle);
+
+		if (firstRun) {
+			pluginProcessor.pluginInference.setExpectedImageDimensions();
+			fcopy.resizedSize = cv::Size{pluginProcessor.pluginInference.getImageWidth(), pluginProcessor.pluginInference.getImageHeight()};
+
+			cv::resize(clone, fcopy.resizedFrameRGB, fcopy.resizedSize);
+			pluginProcessor.rotateFrame(angle, fcopy.resizedFrameRGB);
+
+			cv::resize(pluginProcessor.backgroundImage, pluginProcessor.backgroundImage, fcopy.resizedSize);
+
+			firstRun = false;
+		}
+
+		if (!newFrame) {
+			std::lock_guard<std::mutex> l(inputLock);
+			cv::resize(clone, fcopy.resizedFrameRGB, fcopy.resizedSize);
+			pluginProcessor.rotateFrame(angle, fcopy.resizedFrameRGB);
+			newFrame = true;
+			inputCv.notify_all();
+		}
+
+		fcopy.predictionsFrameRGB = frame;
+		fcopy.predictionsResizedFrameRGB = fcopy.resizedFrameRGB.clone();
+		pluginProcessor.rotateFrame(-angle, fcopy.predictionsResizedFrameRGB);
+		pluginProcessor.drawMaskOnFrame(fcopy.predictionsFrameRGB, fcopy.predictionsResizedFrameRGB,
+												pluginProcessor.computedMask, bgrFrame->linesize[0], angle);
+
+		//======================================================================================
+		// REPLACE AVFRAME DATA WITH FRAME DATA
+		if (bgrFrame && bgrFrame->data[0]) {
+			uint8_t* frameData = bgrFrame->data[0];
+			if(angle == 90 || angle == -90)	{
+				std::memmove(frameData, fcopy.predictionsFrameRGB.data, static_cast<size_t>(pluginFrame->width*pluginFrame->height*3) * sizeof(uint8_t));
+			}
+		}
+		// Copy Frame meta data
+		if (bgrFrame && pluginFrame) {
+			av_frame_copy_props(bgrFrame.get(), pluginFrame);
+			scaler.moveFrom(pluginFrame, bgrFrame.get());
+		}
+
+		// Remove the pointer
+		pluginFrame = nullptr;
+	}
+}
+
+void
+VideoSubscriber::attached(jami::Observable<AVFrame*> *observable)
+{
+	std::ostringstream oss;
+	oss << "::Attached ! " << std::endl;
+	Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+	observable_ = observable;
+}
+
+void
+VideoSubscriber::detached(jami::Observable<AVFrame*> *)
+{
+	firstRun = true;
+	observable_ = nullptr;
+	std::ostringstream oss;
+	oss << "::Detached()" << std::endl;
+	Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+}
+
+void
+VideoSubscriber::detach()
+{
+	if (observable_) {
+		firstRun = true;
+		std::ostringstream oss;
+		oss << "::Calling detach()" << std::endl;
+		Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+		observable_->detach(this);
+	}
+}
+
+void
+VideoSubscriber::stop()
+{
+	running = false;
+	inputCv.notify_all();
+}
+
+void
+VideoSubscriber::setBackground(const std::string& dataPath, const std::string& value)
+{
+	pluginProcessor.setBackgroundImage(dataPath, value);
+}
+}
diff --git a/GreenScreen/videoSubscriber.h b/GreenScreen/videoSubscriber.h
new file mode 100644
index 0000000..4e238d4
--- /dev/null
+++ b/GreenScreen/videoSubscriber.h
@@ -0,0 +1,96 @@
+/**
+ *  Copyright (C) 2004-2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#pragma once
+
+// AvFrame
+extern "C" {
+#include <libavutil/frame.h>
+}
+#include <observer.h>
+
+//STl
+#include <map>
+#include <thread>
+#include <condition_variable>
+
+// Frame Scaler
+#include <framescaler.h>
+
+// OpenCV headers
+#include <opencv2/core.hpp>
+
+#include "pluginProcessor.h"
+
+namespace jami {
+    
+class FrameCopy {
+public:
+    // This frame is a resized version of the original in RGB format
+    cv::Mat resizedFrameRGB;
+    cv::Size resizedSize;
+    // This frame is used to draw predictions into in RGB format
+    cv::Mat predictionsFrameRGB;
+    cv::Size originalSize;
+    // This frame is used to draw predictions into in RGB format on a resized frame
+    cv::Mat predictionsResizedFrameRGB;
+};
+
+class VideoSubscriber : public jami::Observer<AVFrame*> {
+public:
+    VideoSubscriber(const std::string& dataPath);
+    ~VideoSubscriber();
+
+    virtual void update(jami::Observable<AVFrame*> *, AVFrame* const &) override;
+    virtual void attached(jami::Observable<AVFrame*> *) override;
+    virtual void detached(jami::Observable<AVFrame*> *) override;
+
+    void detach();
+    void stop();
+    void setBackground(const std::string& dataPath, const std::string& value);
+
+
+private:
+    // Observer pattern
+    Observable<AVFrame*> *observable_ = nullptr;
+
+    //Data
+    std::string path_;
+
+    // Frame
+    FrameCopy fcopy;
+    cv::Mat frame;
+
+    FrameScaler scaler;
+
+    // Threading
+    std::thread processFrameThread;
+    std::mutex inputLock;
+    std::condition_variable inputCv;
+
+    // Status variables of the processing
+    bool firstRun{true};
+    bool running{true};
+    bool newFrame{false};
+
+    //std::shared_ptr<PluginProcessor> pluginProcessor;
+    PluginProcessor pluginProcessor;
+};
+}