jenkins: linux-gnu, armeabi-v7, windows

Change-Id: I572b43a41344bfe438e18a2a5892b6b7be416e36
diff --git a/GreenScreen/CMakeLists.txt b/GreenScreen/CMakeLists.txt
index 574d9b1..61f9d40 100644
--- a/GreenScreen/CMakeLists.txt
+++ b/GreenScreen/CMakeLists.txt
@@ -6,35 +6,34 @@
 
 project(${ProjectName} VERSION ${Version})
 
-set (DAEMON ./../../daemon)
+set (DAEMON ${PROJECT_SOURCE_DIR}/../../daemon)
 set (PLUGIN_NAME GreenScreen)
 set (JPL_FILE_NAME ${PLUGIN_NAME}.jpl)
 set (DAEMON_SRC ${DAEMON}/src)
 set (CONTRIB_PATH ${DAEMON}/contrib)
-set (DESTINATION_PATH ./../build/)
-set (PLUGINS_LIB ../lib)
+set (PLUGINS_LIB ${PROJECT_SOURCE_DIR}/../lib)
 set (JPL_DIRECTORY ${PROJECT_BINARY_DIR}/jpl)
+set (LIBS_DIR ${PROJECT_SOURCE_DIR}/../contrib/Libs)
 
 if(WIN32)
-	message(OS:\  WINDOWS\ ${CMAKE_SYSTEM_PROCESSOR})
-	if (NOT ${CMAKE_CL_64})
-		message( FATAL_ERROR "\nUse CMake only for x64 Windows" )
-	endif()
-	set (CONTRIB_PLATFORM_CURT x64)
-	set (CONTRIB_PLATFORM ${CONTRIB_PLATFORM_CURT}-windows)
-	set (LIBRARY_FILE_NAME ${PLUGIN_NAME}.dll)
-	set (LIBS_DIR $ENV{HOME}/Documents/GITHUB/Libs)
-	set (OPENCV $ENV{HOME}/Documents/GITHUB/opencv/build-bash/)
-	set (FFMPEG ${CONTRIB_PATH}/build/ffmpeg/Build/win32/x64)
+    message(OS:\  WINDOWS\ ${CMAKE_SYSTEM_PROCESSOR})
+    if (NOT ${CMAKE_CL_64})
+        message( FATAL_ERROR "\nUse CMake only for x64 Windows" )
+    endif()
+    set (CONTRIB_PLATFORM_CURT x64)
+    set (CONTRIB_PLATFORM ${CONTRIB_PLATFORM_CURT}-windows)
+    set (LIBRARY_FILE_NAME ${PLUGIN_NAME}.dll)
+    set (LIBS_BIN_DIR $ENV{PLUGIN_ENV})
+    set (FFMPEG ${CONTRIB_PATH}/build/ffmpeg/Build/win32/x64)
 endif()
 
 if(UNIX)
-	message( FATAL_ERROR "\nUse CMake only for Windows! For linux or Android (linux host), use our bash scripts.\nPlese refer to documentation for more infos." )
-	message(OS:\  LINUX\ ${CMAKE_SYSTEM_PROCESSOR})
-	set (CONTRIB_PLATFORM_CURT ${CMAKE_SYSTEM_PROCESSOR})
-	set (CONTRIB_PLATFORM ${CONTRIB_PLATFORM_CURT}-linux-gnu)
-	set (LIBRARY_FILE_NAME lib${PLUGIN_NAME}.so)
-	set (LIBS_DIR /home/${USER}/Libs)
+    message( FATAL_ERROR "\nUse CMake only for Windows! For linux or Android (linux host), use our bash scripts.\nPlese refer to documentation for more infos." )
+    message(OS:\  LINUX\ ${CMAKE_SYSTEM_PROCESSOR})
+    set (CONTRIB_PLATFORM_CURT ${CMAKE_SYSTEM_PROCESSOR})
+    set (CONTRIB_PLATFORM ${CONTRIB_PLATFORM_CURT}-linux-gnu)
+    set (LIBRARY_FILE_NAME lib${PLUGIN_NAME}.so)
+    set (LIBS_BIN_DIR /home/${USER}/Libs)
 endif()
 
 
@@ -51,19 +50,19 @@
 set(GPU -gpu61)
 
 if (CPU)
-	set(GPU )
-	add_definitions(-DCPU)
-	message(CPU\ BUILDING!)
+    set(GPU )
+    add_definitions(-DCPU)
+    message(CPU\ BUILDING!)
 endif()
 
 if (TFLITE)
-	add_definitions(-DTFLITE)
-	set(TENSORFLOW _tensorflowLite)
-	set(model mobilenet_v2_deeplab_v3_256_myquant.tflite)
-	set(modelType .tflite)
-	set(preferencesFile preferences-tflite.json)
-	set(TFLIB libtensorflowlite)
-	message(TFLITE\ BUILDING!)
+    add_definitions(-DTFLITE)
+    set(TENSORFLOW _tensorflowLite)
+    set(model mobilenet_v2_deeplab_v3_256_myquant.tflite)
+    set(modelType .tflite)
+    set(preferencesFile preferences-tflite.json)
+    set(TFLIB libtensorflowlite)
+    message(TFLITE\ BUILDING!)
 endif()
 
 set(CMAKE_CXX_STANDARD 17)
@@ -72,106 +71,108 @@
 set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd")
 
 set(plugin_SRC main.cpp
-			   pluginInference.cpp
-			   pluginMediaHandler.cpp
-			   pluginParameters.cpp
-			   pluginProcessor.cpp
-			   TFInference.cpp
-			   videoSubscriber.cpp
-			   )
+               pluginInference.cpp
+               pluginMediaHandler.cpp
+               pluginParameters.cpp
+               pluginProcessor.cpp
+               TFInference.cpp
+               videoSubscriber.cpp
+               )
 
 set(plugin_HDR pluginInference.h
-			   pluginMediaHandler.h
-			   pluginParameters.h
-			   pluginProcessor.h
-			   TFInference.h
-			   TFModels.h
-			   videoSubscriber.h
-			   ../lib/accel.h
-			   ../lib/framescaler.h
-			   ../lib/pluglog.h
-			   )
+               pluginMediaHandler.h
+               pluginParameters.h
+               pluginProcessor.h
+               TFInference.h
+               TFModels.h
+               videoSubscriber.h
+               ../lib/accel.h
+               ../lib/framescaler.h
+               ../lib/pluglog.h
+               )
 
 
 
 # add the library
 add_library(${ProjectName} SHARED ${plugin_SRC}
-								  ${plugin_HDR}
-								  )
+                                  ${plugin_HDR}
+                                  )
 
-target_include_directories(${ProjectName} PUBLIC ${PROJECT_BINARY_DIR}
-												 ${PROJECT_SOURCE_DIR}
-												 ${DAEMON_SRC}
-												 ${CONTRIB_PATH}
-												 ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include
-												 ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4
-												 ${OPENCV}/install/include
-												 ${FFMPEG}/include
-												 ${PLUGINS_LIB}
-												 ${LIBS_DIR}
-												 ${LIBS_DIR}/${TENSORFLOW}/include
-												 ${LIBS_DIR}/${TENSORFLOW}/include/third_party/eigen3
-												 ${LIBS_DIR}/${TENSORFLOW}/include/flatbuffers
-												 )
 if (WIN32)
-target_link_directories(${ProjectName} PUBLIC	${CONTRIB_PATH}
-										${LIBS_DIR}/${TENSORFLOW}/lib/${CONTRIB_PLATFORM}
-										${OPENCV}/install/x64/vc16/staticlib
-										${FFMPEG}/bin
-										${LIBS_DIR}
-										${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib
-										${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty
-										${LIBS_DIR}/${TENSORFLOW}/lib/${CONTRIB_PLATFORM}
-										)
+target_include_directories(${ProjectName} PUBLIC ${PROJECT_BINARY_DIR}
+                                                 ${PROJECT_SOURCE_DIR}
+                                                 ${PLUGINS_LIB}
+                                                 ${DAEMON_SRC}
+                                                 ${CONTRIB_PATH}
+                                                 ${FFMPEG}/include
+                                                 ${CONTRIB_PATH}/build/opencv/build/install/include
+                                                 ${LIBS_DIR}/${TENSORFLOW}/include
+                                                 ${LIBS_DIR}/${TENSORFLOW}/include/third_party/eigen3
+                                                 ${LIBS_DIR}/${TENSORFLOW}/include/flatbuffers
+                                                 )
+target_link_directories(${ProjectName} PUBLIC ${CONTRIB_PATH}
+                                        ${LIBS_BIN_DIR}/${TENSORFLOW}/lib/${CONTRIB_PLATFORM}
+                                        ${CONTRIB_PATH}/build/opencv/build/lib/Release
+                                        ${CONTRIB_PATH}/build/opencv/build/3rdparty/lib/Release
+                                        ${FFMPEG}/bin
+                                        )
 
 target_link_libraries(${ProjectName} PUBLIC swscale avutil libpng opencv_imgcodecs411 opencv_imgproc411 opencv_core411 ${TFLIB} zlib)
 endif()
 
 if (UNIX)
-link_directories(${ProjectName} PUBLIC	${CONTRIB_PATH}
-										${LIBS_DIR}/${TENSORFLOW}/lib/${CONTRIB_PLATFORM}
-										${OPENCV}/install/x64/vc16/staticlib
-										${FFMPEG}/bin
-										${LIBS_DIR}
-										${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib
-										${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty
-										${LIBS_DIR}/${TENSORFLOW}/lib/${CONTRIB_PLATFORM}
-										)
+target_include_directories(${ProjectName} PUBLIC ${PROJECT_BINARY_DIR}
+                                                 ${PROJECT_SOURCE_DIR}
+                                                 ${PLUGINS_LIB}
+                                                 ${DAEMON_SRC}
+                                                 ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include
+                                                 ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4
+                                                 ${LIBS_DIR}/${TENSORFLOW}/include
+                                                 ${LIBS_DIR}/${TENSORFLOW}/include/third_party/eigen3
+                                                 ${LIBS_DIR}/${TENSORFLOW}/include/flatbuffers
+                                                 )
+link_directories(${ProjectName} PUBLIC    ${CONTRIB_PATH}
+                                        ${LIBS_BIN_DIR}/${TENSORFLOW}/lib/${CONTRIB_PLATFORM}
+                                        ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib
+                                        ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty
+                                        )
 target_link_libraries(${ProjectName} PUBLIC swscale avutil libpng opencv_imgcodecs opencv_imgproc opencv_core ${TFLIB})
 endif()
 
 add_custom_command(
-	TARGET ${ProjectName}
-	PRE_BUILD
-	COMMAND ${CMAKE_COMMAND} -E remove_directory -r ${JPL_DIRECTORY}
-	COMMAND ${CMAKE_COMMAND} -E remove_directory -r ${JPL_DIRECTORY}/../../../build/${ProjectName}
-	COMMAND ${CMAKE_COMMAND} -E make_directory ${JPL_DIRECTORY}/../../../build/${ProjectName}
-	COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/data ${JPL_DIRECTORY}/data
-	COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBS_DIR}/${TENSORFLOW}/lib/ ${JPL_DIRECTORY}/lib
-	COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/modelsSRC/${model} ${JPL_DIRECTORY}/data/models
-	COMMAND ${CMAKE_COMMAND} -E rename ${JPL_DIRECTORY}/data/models/${model} ${JPL_DIRECTORY}/data/models/mModel${modelType}
-	COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/manifest.json ${JPL_DIRECTORY}
-	COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/${preferencesFile} ${JPL_DIRECTORY}/data
-	COMMAND ${CMAKE_COMMAND} -E rename ${JPL_DIRECTORY}/data/${preferencesFile} ${JPL_DIRECTORY}/data/preferences.json
-	COMMENT "Assembling Plugin files"
+    TARGET ${ProjectName}
+    PRE_BUILD
+    COMMAND ${CMAKE_COMMAND} -E remove_directory -r ${JPL_DIRECTORY}
+    COMMAND ${CMAKE_COMMAND} -E remove_directory -r ${JPL_DIRECTORY}/../../../build/${ProjectName}
+    COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/data ${JPL_DIRECTORY}/data
+    COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBS_BIN_DIR}/${TENSORFLOW}/lib/ ${JPL_DIRECTORY}/lib
+    COMMAND ${CMAKE_COMMAND} -E make_directory ${JPL_DIRECTORY}/data/models
+    COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/modelsSRC/${model} ${JPL_DIRECTORY}/data/models
+    COMMAND ${CMAKE_COMMAND} -E rename ${JPL_DIRECTORY}/data/models/${model} ${JPL_DIRECTORY}/data/models/mModel${modelType}
+    COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/manifest.json ${JPL_DIRECTORY}
+    COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/${preferencesFile} ${JPL_DIRECTORY}/data
+    COMMAND ${CMAKE_COMMAND} -E rename ${JPL_DIRECTORY}/data/${preferencesFile} ${JPL_DIRECTORY}/data/preferences.json
+    COMMENT "Assembling Plugin files"
 )
 
 if (WIN32)
-	add_custom_command(
-		TARGET ${ProjectName}
-		POST_BUILD
-		COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_BINARY_DIR}/Release/${ProjectName}.lib ${JPL_DIRECTORY}/lib/${CONTRIB_PLATFORM}
-		COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_BINARY_DIR}/Release/${LIBRARY_FILE_NAME} ${JPL_DIRECTORY}/lib/${CONTRIB_PLATFORM}
-		COMMAND python ${PROJECT_SOURCE_DIR}/../assemblePlugin.py --plugins=GreenScreen
-		COMMENT "Generating JPL archive"
-	)
+    add_custom_command(
+        TARGET ${ProjectName}
+        POST_BUILD
+        COMMAND ${CMAKE_COMMAND} -E make_directory ${JPL_DIRECTORY}/../../../build/x64-windows/${TENSORFLOW}
+        COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_BINARY_DIR}/Release/${ProjectName}.lib ${JPL_DIRECTORY}/lib/${CONTRIB_PLATFORM}
+        COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_BINARY_DIR}/Release/${LIBRARY_FILE_NAME} ${JPL_DIRECTORY}/lib/${CONTRIB_PLATFORM}
+        COMMAND python ${PROJECT_SOURCE_DIR}/../assemble-plugin.py --plugins=GreenScreen --extraPath=${TENSORFLOW}
+        COMMENT "Generating JPL archive"
+    )
 else()
-	add_custom_command(
-		TARGET ${ProjectName}
-		POST_BUILD
-		COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_BINARY_DIR}/Release/${LIBRARY_FILE_NAME} ${JPL_DIRECTORY}/lib/${CONTRIB_PLATFORM}
-		COMMAND python ${PROJECT_SOURCE_DIR}/../assemblePlugin.py --plugins=GreenScreen
-		COMMENT "Generating JPL archive"
-	)
+    add_custom_command(
+        TARGET ${ProjectName}
+        POST_BUILD
+        COMMAND ${CMAKE_COMMAND} -E make_directory ${JPL_DIRECTORY}/../../../build/x86_64-linux-gnu/${TENSORFLOW}
+        COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_BINARY_DIR}/Release/${LIBRARY_FILE_NAME} ${JPL_DIRECTORY}/lib/${CONTRIB_PLATFORM}
+        COMMAND python ${PROJECT_SOURCE_DIR}/../assemble-plugin.py --plugins=GreenScreen --extraPath=${TENSORFLOW}
+        COMMENT "Generating JPL archive"
+    )
 
 endif()
\ No newline at end of file
diff --git a/GreenScreen/TFInference.cpp b/GreenScreen/TFInference.cpp
index 678bc94..9fa036d 100644
--- a/GreenScreen/TFInference.cpp
+++ b/GreenScreen/TFInference.cpp
@@ -15,20 +15,19 @@
  *
  *  You should have received a copy of the GNU General Public License
  *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301
+ * USA.
  */
 
 #include "TFInference.h"
 // Std libraries
 #include <fstream>
-#include <numeric>
 #include <iostream>
+#include <numeric>
 #include <stdlib.h>
 
-
-#ifdef TFLITE 
+#ifdef TFLITE
 // Tensorflow headers
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/builtin_op_data.h>
 #include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
@@ -49,7 +48,9 @@
 const std::string TAG = "FORESEG";
 
 namespace jami {
-TensorflowInference::TensorflowInference(TFModel tfModel) : tfModel(tfModel) {}
+TensorflowInference::TensorflowInference(TFModel tfModel)
+    : tfModel(tfModel)
+{}
 
 TensorflowInference::~TensorflowInference() {}
 
@@ -69,8 +70,7 @@
     if (!flatbufferModel) {
         std::runtime_error("Failed to load the model file");
     }
-    Plog::log(Plog::LogPriority::INFO, "TENSOR", "MODEL LOADED" );
-
+    Plog::log(Plog::LogPriority::INFO, "TENSOR", "MODEL LOADED");
 }
 void
 TensorflowInference::buildInterpreter()
@@ -80,22 +80,20 @@
     tflite::ops::builtin::BuiltinOpResolver resolver;
     tflite::InterpreterBuilder builder(*flatbufferModel, resolver);
     builder(&interpreter);
-    if(interpreter) {
+    if (interpreter) {
         setInterpreterSettings();
-        Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER BUILT" );
+        Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER BUILT");
 
         if (tfModel.useNNAPI) {
-                TfLiteDelegate* optionalNnApiDelegate = tflite::NnApiDelegate();
+            TfLiteDelegate* optionalNnApiDelegate = tflite::NnApiDelegate();
 
-                if (interpreter->ModifyGraphWithDelegate(optionalNnApiDelegate) != kTfLiteOk) {
-                    Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER ERROR!!!" );
-                }
-                else {
-                    Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER SET" );
-                    allocateTensors();
-                }
-        }
-        else {
+            if (interpreter->ModifyGraphWithDelegate(optionalNnApiDelegate) != kTfLiteOk) {
+                Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER ERROR!!!");
+            } else {
+                Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER SET");
+                allocateTensors();
+            }
+        } else {
             allocateTensors();
         }
     }
@@ -107,7 +105,7 @@
     if (interpreter->AllocateTensors() != kTfLiteOk) {
         std::runtime_error("Failed to allocate tensors!");
     } else {
-        Plog::log(Plog::LogPriority::INFO, "TENSOR", "TENSORS ALLOCATED" );
+        Plog::log(Plog::LogPriority::INFO, "TENSOR", "TENSORS ALLOCATED");
         allocated_ = true;
     }
 }
@@ -118,20 +116,19 @@
     // PrintInterpreterState(interpreter.get());
     std::ostringstream oss;
     oss << "=============== inputs/outputs dimensions ==============="
-            << "\n";
+        << "\n";
     const std::vector<int> inputs = interpreter->inputs();
     const std::vector<int> outputs = interpreter->outputs();
     oss << "number of inputs: " << inputs.size() << std::endl;
     oss << "number of outputs: " << outputs.size() << std::endl;
 
-    Plog::log(Plog::LogPriority::INFO, "TENSOR", oss.str() );
+    Plog::log(Plog::LogPriority::INFO, "TENSOR", oss.str());
     int input = interpreter->inputs()[0];
     int output = interpreter->outputs()[0];
     oss << "input 0 index: " << input << std::endl;
     oss << "output 0 index: " << output << std::endl;
-    oss << "=============== input dimensions ==============="
-            << std::endl;
-    Plog::log(Plog::LogPriority::INFO, "TENSOR", oss.str() );
+    oss << "=============== input dimensions ===============" << std::endl;
+    Plog::log(Plog::LogPriority::INFO, "TENSOR", oss.str());
     // get input dimension from the input tensor metadata
     // assuming one input only
 
@@ -142,8 +139,8 @@
     }
     oss.str("");
     oss << "=============== output dimensions ==============="
-            << "\n";
-    Plog::log(Plog::LogPriority::INFO, "TENSOR", oss.str() );
+        << "\n";
+    Plog::log(Plog::LogPriority::INFO, "TENSOR", oss.str());
     // get input dimension from the input tensor metadata
     // assuming one input only
     for (size_t i = 0; i < outputs.size(); i++) {
@@ -173,19 +170,18 @@
     for (size_t i = 0; i < nbDimensions; i++) {
         if (i == dimensions.size() - 1) {
             tensorDescription << dimensions[i];
-        }
-        else {
+        } else {
             tensorDescription << dimensions[i] << " x ";
         }
     }
     tensorDescription << std::endl;
-    Plog::log(Plog::LogPriority::INFO, "TENSOR", tensorDescription.str() );
+    Plog::log(Plog::LogPriority::INFO, "TENSOR", tensorDescription.str());
 }
 
 std::vector<int>
 TensorflowInference::getTensorDimensions(int index) const
 {
-    TfLiteIntArray *dims = interpreter->tensor(index)->dims;
+    TfLiteIntArray* dims = interpreter->tensor(index)->dims;
     size_t size = static_cast<size_t>(interpreter->tensor(index)->dims->size);
     std::vector<int> result;
     result.reserve(size);
@@ -203,7 +199,9 @@
 {
     for (size_t i = 0; i < tfModel.numberOfRuns; i++) {
         if (interpreter->Invoke() != kTfLiteOk) {
-            Plog::log(Plog::LogPriority::INFO, "RUN GRAPH", "A problem occured when running the graph");
+            Plog::log(Plog::LogPriority::INFO,
+                      "RUN GRAPH",
+                      "A problem occured when running the graph");
         }
     }
 }
@@ -224,43 +222,49 @@
 TensorflowInference::LoadGraph()
 {
     tensorflow::GraphDef graph_def;
-    tensorflow::Status load_graph_status = tensorflow::ReadBinaryProto(tensorflow::Env::Default(), tfModel.modelPath, &graph_def);
+    tensorflow::Status load_graph_status = tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
+                                                                       tfModel.modelPath,
+                                                                       &graph_def);
     if (!load_graph_status.ok()) {
         allocated_ = false;
         Plog::log(Plog::LogPriority::INFO, "LOAD GRAPH", "A problem occured when loading the graph");
-        return ;
+        return;
     }
     Plog::log(Plog::LogPriority::INFO, "LOAD GRAPH", "graph loaded");
 
-    // Plog::log(Plog::LogPriority::INFO, "GRAPH SIZE: ", std::to_string(graph_def.node_size()));
-    // for (auto& node : *graph_def.mutable_node())
+    // Plog::log(Plog::LogPriority::INFO, "GRAPH SIZE: ",
+    // std::to_string(graph_def.node_size())); for (auto& node :
+    // *graph_def.mutable_node())
     // {
-    //     Plog::log(Plog::LogPriority::INFO, "GRAPH NODE: ", node.name().c_str());
-    //     // Plog::log(Plog::LogPriority::INFO, "\tNODE SIZE: ", node.().c_str());
+    //     Plog::log(Plog::LogPriority::INFO, "GRAPH NODE: ",
+    //     node.name().c_str());
+    //     // Plog::log(Plog::LogPriority::INFO, "\tNODE SIZE: ",
+    //     node.().c_str());
     // }
 
     PluginParameters* parameters = getGlobalPluginParameters();
 
     tensorflow::SessionOptions options;
-    if(parameters->useGPU) {
+    if (parameters->useGPU) {
         options.config.mutable_gpu_options()->set_allow_growth(true);
         options.config.mutable_gpu_options()->set_per_process_gpu_memory_fraction(0.3);
-    }
-    else {
+    } else {
 #ifdef WIN32
-        options.config.mutable_device_count()->insert({ "CPU", 1 });
-        options.config.mutable_device_count()->insert({ "GPU", 0 });
+        options.config.mutable_device_count()->insert({"CPU", 1});
+        options.config.mutable_device_count()->insert({"GPU", 0});
 #else
-	setenv("CUDA_VISIBLE_DEVICES", "", 1);
+        setenv("CUDA_VISIBLE_DEVICES", "", 1);
 #endif
     }
-    
+
     (&session)->reset(tensorflow::NewSession(options));
     tensorflow::Status session_create_status = session->Create(graph_def);
     if (!session_create_status.ok()) {
-        Plog::log(Plog::LogPriority::INFO, "INIT SESSION", "A problem occured when initializating session");
+        Plog::log(Plog::LogPriority::INFO,
+                  "INIT SESSION",
+                  "A problem occured when initializating session");
         allocated_ = true;
-        return ;
+        return;
     }
     Plog::log(Plog::LogPriority::INFO, "INIT SESSION", "session initialized");
 
@@ -272,9 +276,14 @@
 {
     for (size_t i = 0; i < tfModel.numberOfRuns; i++) {
         // Actually run the image through the model.
-        tensorflow::Status run_status = session->Run({{tfModel.inputLayer, imageTensor}}, {tfModel.outputLayer}, {}, &outputs);
+        tensorflow::Status run_status = session->Run({{tfModel.inputLayer, imageTensor}},
+                                                     {tfModel.outputLayer},
+                                                     {},
+                                                     &outputs);
         if (!run_status.ok()) {
-            Plog::log(Plog::LogPriority::INFO, "RUN GRAPH", "A problem occured when running the graph");
+            Plog::log(Plog::LogPriority::INFO,
+                      "RUN GRAPH",
+                      "A problem occured when running the graph");
         }
     }
 }
@@ -287,4 +296,4 @@
 }
 #endif
 
-}
+} // namespace jami
diff --git a/GreenScreen/TFInference.h b/GreenScreen/TFInference.h
index 823dab9..778a253 100644
--- a/GreenScreen/TFInference.h
+++ b/GreenScreen/TFInference.h
@@ -60,15 +60,14 @@
 struct SessionOptions;
 class TensorShape;
 class Env;
-enum DataType:int;
-} // namespace namespace tensorflow
+enum DataType : int;
+} // namespace tensorflow
 
 #endif
 
-
-namespace jami
+namespace jami {
+class TensorflowInference
 {
-class TensorflowInference {
 public:
     /**
      * @brief TensorflowInference
@@ -102,7 +101,7 @@
     void LoadGraph();
     tensorflow::Tensor imageTensor;
 
-#endif //TFLITE
+#endif // TFLITE
 
     /**
      * @brief runGraph
@@ -150,4 +149,4 @@
 
     bool allocated_ = false;
 };
-}
+} // namespace jami
diff --git a/GreenScreen/TFModels.h b/GreenScreen/TFModels.h
index be0b4ae..43787c9 100644
--- a/GreenScreen/TFModels.h
+++ b/GreenScreen/TFModels.h
@@ -25,11 +25,14 @@
 #include <vector>
 #include "pluginParameters.h"
 
-struct TFModelConfiguration {
-    TFModelConfiguration (std::string& model): modelPath{model} {}
+struct TFModelConfiguration
+{
+    TFModelConfiguration(std::string& model)
+        : modelPath {model}
+    {}
     std::string modelPath;
     std::vector<unsigned int> normalizationValues;
-    std::vector<int> dims = {1, 385, 385, 3}; //model Input dimensions
+    std::vector<int> dims = {1, 385, 385, 3}; // model Input dimensions
     unsigned int numberOfRuns = 1;
     // TensorflowLite specific settings
 
@@ -48,14 +51,24 @@
     std::string inputLayer = "sub_2";
     std::string outputLayer = "float_segments";
 #endif // TFLITE
-
 };
 
-struct TFModel : TFModelConfiguration {
-    TFModel(std::string&& model, std::string&& labels): TFModelConfiguration(model), labelsPath{labels}{}
-    TFModel(std::string& model, std::string& labels): TFModelConfiguration(model), labelsPath{labels}{}
-    TFModel(std::string&& model): TFModelConfiguration(model) {}
-    TFModel(std::string& model): TFModelConfiguration(model) {}
+struct TFModel : TFModelConfiguration
+{
+    TFModel(std::string&& model, std::string&& labels)
+        : TFModelConfiguration(model)
+        , labelsPath {labels}
+    {}
+    TFModel(std::string& model, std::string& labels)
+        : TFModelConfiguration(model)
+        , labelsPath {labels}
+    {}
+    TFModel(std::string&& model)
+        : TFModelConfiguration(model)
+    {}
+    TFModel(std::string& model)
+        : TFModelConfiguration(model)
+    {}
 
     std::string labelsPath = " ";
     unsigned int labelsPadding = 16;
diff --git a/GreenScreen/build.sh b/GreenScreen/build.sh
old mode 100644
new mode 100755
index d9b6fbf..d346c19
--- a/GreenScreen/build.sh
+++ b/GreenScreen/build.sh
@@ -1,78 +1,355 @@
 #! /bin/bash
 # Build the plugin for the project
-if [ -z $DAEMON ]; then
+export OSTYPE
+ARCH=$(arch)
+
+# Flags:
+
+  # -p: number of processors to use
+  # -c: Runtime plugin cpu/gpu setting.
+  # -t: target platform.
+
+
+if [ -z "${DAEMON}" ]; then
     DAEMON="./../../daemon"
-    echo "DAEMON not provided, building for ${DAEMON}"
+    echo "DAEMON not provided, building with ${DAEMON}"
 fi
 
 PLUGIN_NAME="GreenScreen"
-JPL_FILE_NAME=${PLUGIN_NAME}".jpl"
-SO_FILE_NAME="lib"${PLUGIN_NAME}".so"
+JPL_FILE_NAME="${PLUGIN_NAME}.jpl"
+SO_FILE_NAME="lib${PLUGIN_NAME}.so"
 DAEMON_SRC="${DAEMON}/src"
 CONTRIB_PATH="${DAEMON}/contrib"
-DESTINATION_PATH="./../build/"
 PLUGINS_LIB="../lib"
-LIBS_DIR="/home/${USER}/Libs"
+LIBS_DIR="./../contrib/Libs"
+
+if [ -z "${TF_LIBS_DIR}" ]; then
+    TF_LIBS_DIR="./../../../Libs"
+fi
+echo "Building with ${TF_LIBS_DIR}"
+
+PLATFORM="linux-gnu"
+PROCESSOR='GPU'
+
+while getopts t:c:p OPT; do
+  case "$OPT" in
+    t)
+      PLATFORM="${OPTARG}"
+      if [ -z "${TF}" ]; then
+          if [ "$PLATFORM" = 'linux-gnu' ]; then
+              TF="_tensorflow_cc"
+          elif [ "$PLATFORM" = 'android' ]; then
+              TF="_tensorflowLite"
+          fi
+      fi
+    ;;
+    c)
+      PROCESSOR="${OPTARG}"
+    ;;
+    p)
+    ;;
+    \?)
+      exit 1
+    ;;
+  esac
+done
 
 
-CONTRIB_PLATFORM_CURT=x86_64
-CONTRIB_PLATFORM=${CONTRIB_PLATFORM_CURT}-linux-gnu
-
-mkdir -p lib/${CONTRIB_PLATFORM}
-mkdir -p ${DESTINATION_PATH}/jpl
-
-# Compile
-clang++ -std=c++17 -shared -fPIC \
--Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
--Wall -Wextra \
--Wno-unused-variable \
--Wno-unused-function \
--Wno-unused-parameter \
--DTFLITE \
--I"." \
--I${DAEMON_SRC} \
--I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
--I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
--I${LIBS_DIR}/_tensorflow_distribution/include/flatbuffers \
--I${LIBS_DIR}/_tensorflow_distribution/include \
--I${PLUGINS_LIB} \
-main.cpp \
-videoSubscriber.cpp \
-pluginProcessor.cpp \
-pluginMediaHandler.cpp \
-TFInference.cpp \
-pluginInference.cpp \
-pluginParameters.cpp \
--L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/ \
--L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty/ \
--L${LIBS_DIR}/_tensorflow_distribution/lib/${CONTRIB_PLATFORM}/ \
--lswscale \
--lavutil \
--lopencv_imgcodecs \
--lopencv_imgproc \
--lopencv_core \
--ltensorflowlite \
--lpng \
--o lib/${CONTRIB_PLATFORM}/${SO_FILE_NAME}
-# (above) Always put opencv_core after all other opencv libs
-# (above) Always put avutil after all other ffmpeg libs
-# (above) Always put png after all other libs
-
+if [ -z "${TF}" ]; then
+    TF="_tensorflow_cc"
+fi
+echo "Building with ${TF}"
 mkdir ./data/models
-cp ${LIBS_DIR}/_tensorflow_distribution/lib/${CONTRIB_PLATFORM}/libtensorflowlite.so lib/$CONTRIB_PLATFORM
-cp /usr/lib/${CONTRIB_PLATFORM}/libswscale.so.4 lib/$CONTRIB_PLATFORM
-cp /usr/lib/${CONTRIB_PLATFORM}/libavutil.so.55 lib/$CONTRIB_PLATFORM
-cp /usr/lib/${CONTRIB_PLATFORM}/libpng16.so.16 lib/$CONTRIB_PLATFORM
 
-cp ./modelsSRC/mobilenet_v2_deeplab_v3_256_myquant.tflite ./data/models/mModel.tflite
-cp ./preferences-tflite.json ./data/preferences.json
+
+if [[ "${TF}" = "_tensorflow_cc" ]] && [[ "${PLATFORM}" = "linux-gnu" ]]
+then
+    if [ -z "$CUDALIBS" ]; then
+        rm -r ./data/models
+        echo "CUDALIBS must point to CUDA 10.1!"
+        exit
+    fi
+    if [ -z "$CUDNN" ]; then
+        rm -r ./data/models
+        echo "CUDNN must point to libcudnn.so 7!"
+        exit
+    fi
+
+    echo "Building for ${PROCESSOR}"
+
+    CONTRIB_PLATFORM_CURT=${ARCH}
+    CONTRIB_PLATFORM=${CONTRIB_PLATFORM_CURT}-${PLATFORM}
+    DESTINATION_PATH="./../build/${CONTRIB_PLATFORM}/${TF}"
+    mkdir -p "lib/${CONTRIB_PLATFORM}"
+    mkdir -p "${DESTINATION_PATH}"
+
+
+    # Compile
+    clang++ -std=c++17 -shared -fPIC \
+    -Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
+    -Wall -Wextra \
+    -Wno-unused-variable \
+    -Wno-unused-function \
+    -Wno-unused-parameter \
+    -D"${PROCESSOR}" \
+    -I"." \
+    -I"${DAEMON_SRC}" \
+    -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
+    -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
+    -I"${LIBS_DIR}/${TF}/include" \
+    -I"${LIBS_DIR}/${TF}/include/third_party/eigen3" \
+    -I"${PLUGINS_LIB}" \
+    main.cpp \
+    videoSubscriber.cpp \
+    pluginProcessor.cpp \
+    pluginMediaHandler.cpp \
+    TFInference.cpp \
+    pluginInference.cpp \
+    pluginParameters.cpp \
+    -L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/" \
+    -L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty/" \
+    -L"${TF_LIBS_DIR}/${TF}/lib/${CONTRIB_PLATFORM}-gpu61/" \
+    -lswscale \
+    -lavutil \
+    -lopencv_imgcodecs \
+    -lopencv_imgproc \
+    -lopencv_core \
+    -ltensorflow_cc \
+    -lpng \
+    -o "lib/${CONTRIB_PLATFORM}/${SO_FILE_NAME}"
+
+    cp "${TF_LIBS_DIR}/${TF}/lib/${CONTRIB_PLATFORM}-gpu61/libtensorflow_cc.so" "lib/$CONTRIB_PLATFORM/libtensorflow_cc.so.2"
+    cp "/usr/lib/${CONTRIB_PLATFORM}/libswscale.so.4" "lib/$CONTRIB_PLATFORM"
+    cp "/usr/lib/${CONTRIB_PLATFORM}/libavutil.so.55" "lib/$CONTRIB_PLATFORM"
+    cp "/usr/lib/${CONTRIB_PLATFORM}/libpng16.so.16" "lib/$CONTRIB_PLATFORM"
+    cp "${CUDALIBS}/libcudart.so" "lib/$CONTRIB_PLATFORM/libcudart.so.10.0"
+    cp "${CUDNN}/libcublas.so.10" "lib/$CONTRIB_PLATFORM/libcublas.so.10.0"
+    cp "${CUDALIBS}/libcufft.so.10" "lib/$CONTRIB_PLATFORM/libcufft.so.10.0"
+    cp "${CUDALIBS}/libcurand.so.10" "lib/$CONTRIB_PLATFORM/libcurand.so.10.0"
+    cp "${CUDALIBS}/libcusolver.so.10" "lib/$CONTRIB_PLATFORM/libcusolver.so.10.0"
+    cp "${CUDALIBS}/libcusparse.so.10" "lib/$CONTRIB_PLATFORM/libcusparse.so.10.0"
+    cp "${CUDNN}/libcudnn.so.7" "lib/$CONTRIB_PLATFORM"
+
+    cp ./modelsSRC/mModel-resnet50float.pb ./data/models/mModel.pb
+    cp ./preferences-tfcc.json ./data/preferences.json
+elif [ "${TF}" = "_tensorflowLite" ]
+then
+    if [ "${PLATFORM}" = "linux-gnu" ]
+    then
+        CONTRIB_PLATFORM_CURT=${ARCH}
+        CONTRIB_PLATFORM=${CONTRIB_PLATFORM_CURT}-${PLATFORM}
+        DESTINATION_PATH="./../build/${CONTRIB_PLATFORM}/${TF}"
+        mkdir -p "lib/${CONTRIB_PLATFORM}"
+        mkdir -p "${DESTINATION_PATH}"
+
+        # Compile
+        clang++ -std=c++17 -shared -fPIC \
+        -Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
+        -Wall -Wextra \
+        -Wno-unused-variable \
+        -Wno-unused-function \
+        -Wno-unused-parameter \
+        -DTFLITE \
+        -I"." \
+        -I"${DAEMON_SRC}" \
+        -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
+        -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
+        -I"${LIBS_DIR}/${TF}/include/flatbuffers" \
+        -I"${LIBS_DIR}/${TF}/include" \
+        -I"${PLUGINS_LIB}" \
+        main.cpp \
+        videoSubscriber.cpp \
+        pluginProcessor.cpp \
+        pluginMediaHandler.cpp \
+        TFInference.cpp \
+        pluginInference.cpp \
+        pluginParameters.cpp \
+        -L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/" \
+        -L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty/" \
+        -L"${TF_LIBS_DIR}/${TF}/lib/${CONTRIB_PLATFORM}/" \
+        -lswscale \
+        -lavutil \
+        -lopencv_imgcodecs \
+        -lopencv_imgproc \
+        -lopencv_core \
+        -ltensorflowlite \
+        -lpng \
+        -o "lib/${CONTRIB_PLATFORM}/${SO_FILE_NAME}"
+
+        cp "${TF_LIBS_DIR}/${TF}/lib/${CONTRIB_PLATFORM}/libtensorflowlite.so" "lib/$CONTRIB_PLATFORM"
+        cp "/usr/lib/${CONTRIB_PLATFORM}/libswscale.so.4" "lib/$CONTRIB_PLATFORM"
+        cp "/usr/lib/${CONTRIB_PLATFORM}/libavutil.so.55" "lib/$CONTRIB_PLATFORM"
+        cp "/usr/lib/${CONTRIB_PLATFORM}/libpng16.so.16" "lib/$CONTRIB_PLATFORM"
+
+    elif [ "${PLATFORM}" = "android" ]
+    then
+        DESTINATION_PATH="./../build/android"
+        mkdir -p "${DESTINATION_PATH}"
+
+        if [ -z "$ANDROID_NDK" ]; then
+             ANDROID_NDK="/home/${USER}/Android/Sdk/ndk/21.1.6352462"
+            echo "ANDROID_NDK not provided, building with ${ANDROID_NDK}"
+        fi
+
+        #=========================================================
+        #    Check if the ANDROID_ABI was provided
+        #    if not, set default
+        #=========================================================
+        if [ -z "$ANDROID_ABI" ]; then
+            ANDROID_ABI="armeabi-v7a arm64-v8a"
+            echo "ANDROID_ABI not provided, building for ${ANDROID_ABI}"
+        fi
+
+        buildlib() {
+            echo "$CURRENT_ABI"
+            mkdir -p "lib/$CURRENT_ABI"
+
+            #=========================================================
+            #    ANDROID TOOLS
+            #=========================================================
+            export HOST_TAG=linux-x86_64
+            export TOOLCHAIN=$ANDROID_NDK/toolchains/llvm/prebuilt/$HOST_TAG
+
+            if [ "$CURRENT_ABI" = armeabi-v7a ]
+            then
+            export AR=$TOOLCHAIN/bin/arm-linux-android-ar
+            export AS=$TOOLCHAIN/bin/arm-linux-android-as
+            export CC=$TOOLCHAIN/bin/armv7a-linux-androideabi21-clang
+            export CXX=$TOOLCHAIN/bin/armv7a-linux-androideabi21-clang++
+            export LD=$TOOLCHAIN/bin/arm-linux-android-ld
+            export RANLIB=$TOOLCHAIN/bin/arm-linux-android-ranlib
+            export STRIP=$TOOLCHAIN/bin/arm-linux-androideabi-strip
+            export ANDROID_SYSROOT=${DAEMON}/../client-android/android-toolchain-21-arm/sysroot
+
+            elif [ "$CURRENT_ABI" = arm64-v8a ]
+            then
+            export AR=$TOOLCHAIN/bin/aarch64-linux-android-ar
+            export AS=$TOOLCHAIN/bin/aarch64-linux-android-as
+            export CC=$TOOLCHAIN/bin/aarch64-linux-android21-clang
+            export CXX=$TOOLCHAIN/bin/aarch64-linux-android21-clang++
+            export LD=$TOOLCHAIN/bin/aarch64-linux-android-ld
+            export RANLIB=$TOOLCHAIN/bin/aarch64-linux-android-ranlib
+            export STRIP=$TOOLCHAIN/bin/aarch64-linux-android-strip
+            export ANDROID_SYSROOT=${DAEMON}/../client-android/android-toolchain-21-arm64/sysroot
+
+            elif [ "$CURRENT_ABI" = x86_64 ]
+            then
+            export AR=$TOOLCHAIN/bin/x86_64-linux-android-ar
+            export AS=$TOOLCHAIN/bin/x86_64-linux-android-as
+            export CC=$TOOLCHAIN/bin/x86_64-linux-android21-clang
+            export CXX=$TOOLCHAIN/bin/x86_64-linux-android21-clang++
+            export LD=$TOOLCHAIN/bin/x86_64-linux-android-ld
+            export RANLIB=$TOOLCHAIN/bin/x86_64-linux-android-ranlib
+            export STRIP=$TOOLCHAIN/bin/x86_64-linux-android-strip
+            export ANDROID_SYSROOT=${DAEMON}/../client-android/android-toolchain-21-x86_64/sysroot
+
+            else
+            echo "ABI NOT OK" >&2
+            rm -r lib/
+            rm -r ./data/models
+            exit 1
+            fi
+
+            #=========================================================
+            #    CONTRIBS
+            #=========================================================
+            if [ "$CURRENT_ABI" = armeabi-v7a ]
+            then
+            CONTRIB_PLATFORM=arm-linux-androideabi
+
+            elif [ "$CURRENT_ABI" = arm64-v8a ]
+            then
+            CONTRIB_PLATFORM=aarch64-linux-android
+
+            elif [ "$CURRENT_ABI" = x86_64 ]
+            then
+            CONTRIB_PLATFORM=x86_64-linux-android
+            fi
+
+            #NDK SOURCES FOR cpufeatures
+            NDK_SOURCES=${ANDROID_NDK}/sources/android
+
+            #=========================================================
+            #    LD_FLAGS
+            #=========================================================
+            if [ "$CURRENT_ABI" = armeabi-v7a ]
+            then
+            export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/arm-linux-androideabi -L${ANDROID_SYSROOT}/usr/lib/arm-linux-androideabi/21"
+            elif [ "$CURRENT_ABI" = arm64-v8a ]
+            then
+            export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/aarch64-linux-android -L${ANDROID_SYSROOT}/usr/lib/aarch64-linux-android/21"
+            elif [ "$CURRENT_ABI" = x86_64 ]
+            then
+            export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/x86_64-linux-android -L${ANDROID_SYSROOT}/usr/lib/x86_64-linux-android/21"
+            fi
+
+            #=========================================================
+            #    Compile CPU FEATURES, NEEDED FOR OPENCV
+            #=========================================================
+            $CC -c "$NDK_SOURCES/cpufeatures/cpu-features.c" -o cpu-features.o -o cpu-features.o --sysroot=$ANDROID_SYSROOT
+
+            #=========================================================
+            #    Compile the plugin
+            #=========================================================
+
+            # Create so destination folder
+            $CXX --std=c++14 -O3 -g -fPIC \
+            -Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
+            -shared \
+            -Wall -Wextra \
+            -Wno-unused-variable \
+            -Wno-unused-function \
+            -Wno-unused-parameter \
+            -DTFLITE \
+            -I"." \
+            -I"${DAEMON_SRC}" \
+            -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
+            -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
+            -I"${LIBS_DIR}/${TF}/include/flatbuffers" \
+            -I"${LIBS_DIR}/${TF}/include" \
+            -I"${PLUGINS_LIB}" \
+            main.cpp \
+            videoSubscriber.cpp \
+            pluginProcessor.cpp \
+            pluginMediaHandler.cpp \
+            TFInference.cpp \
+            pluginInference.cpp \
+            pluginParameters.cpp \
+            cpu-features.o \
+            -L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/" \
+            -L"${TF_LIBS_DIR}/${TF}/lib/${CURRENT_ABI}/" \
+            -lswscale \
+            -lavutil \
+            -lopencv_imgcodecs \
+            -lopencv_imgproc \
+            -lopencv_core \
+            -llibpng \
+            -ltensorflowlite \
+            -llog -lz \
+            --sysroot=$ANDROID_SYSROOT \
+            -o "lib/$CURRENT_ABI/${SO_FILE_NAME}"
+
+            cp "${TF_LIBS_DIR}/${TF}/lib/${CURRENT_ABI}/libtensorflowlite.so" "lib/$CURRENT_ABI"
+            rm cpu-features.o
+        }
+
+        # Build the so
+        for i in ${ANDROID_ABI}; do
+            CURRENT_ABI=$i
+            buildlib
+        done
+    fi
+
+    cp ./modelsSRC/mobilenet_v2_deeplab_v3_256_myquant.tflite ./data/models/mModel.tflite
+    cp ./preferences-tflite.json ./data/preferences.json
+fi
 
 zip -r ${JPL_FILE_NAME} data manifest.json lib
-mv ${JPL_FILE_NAME} ${DESTINATION_PATH}/jpl/
+mv ${JPL_FILE_NAME} ${DESTINATION_PATH}/
 
 # Cleanup
 # Remove lib after compilation
 rm -rf lib
-rm -r ./data/models/
-rm ./data/models/mModel.tflite
+rm -r ./data/models
 rm ./data/preferences.json
+
diff --git a/GreenScreen/buildandroid.sh b/GreenScreen/buildandroid.sh
deleted file mode 100644
index c0e2343..0000000
--- a/GreenScreen/buildandroid.sh
+++ /dev/null
@@ -1,188 +0,0 @@
-#! /bin/bash
-# Build the plugin for the project
-if [ -z $DAEMON ]; then
-    DAEMON="./../../daemon"
-    echo "DAEMON not provided, building for ${DAEMON}"
-fi
-if [ -z $ANDROID_NDK ]; then
-	ANDROID_NDK=/home/${USER}/Android/Sdk/ndk/21.1.6352462
-    echo "ANDROID_NDK not provided, building with ${ANDROID_NDK}"
-fi
-
-PLUGIN_NAME="GreenScreen"
-JPL_FILE_NAME=${PLUGIN_NAME}".jpl"
-SO_FILE_NAME="lib"${PLUGIN_NAME}".so"
-LIBS_DIR="/home/${USER}/Libs"
-DAEMON_SRC="${DAEMON}/src"
-CONTRIB_PATH="${DAEMON}/contrib"
-DESTINATION_PATH="./../build/"
-PLUGINS_LIB="../lib"
-
-#=========================================================
-#	Check if the ANDROID_ABI was provided
-#	if not, set default
-#=========================================================
-if [ -z $ANDROID_ABI ]; then
-    ANDROID_ABI="armeabi-v7a arm64-v8a"
-    echo "ANDROID_ABI not provided, building for ${ANDROID_ABI}"
-fi
-
-buildlib() {
-	echo $CURRENT_ABI
-	#=========================================================
-	#	ANDROID TOOLS
-	#=========================================================
-	export HOST_TAG=linux-x86_64
-	export TOOLCHAIN=$ANDROID_NDK/toolchains/llvm/prebuilt/$HOST_TAG
-
-	if [ $CURRENT_ABI = armeabi-v7a ]
-	then
-	export AR=$TOOLCHAIN/bin/arm-linux-android-ar
-	export AS=$TOOLCHAIN/bin/arm-linux-android-as
-	export CC=$TOOLCHAIN/bin/armv7a-linux-androideabi21-clang
-	export CXX=$TOOLCHAIN/bin/armv7a-linux-androideabi21-clang++
-	export LD=$TOOLCHAIN/bin/arm-linux-android-ld
-	export RANLIB=$TOOLCHAIN/bin/arm-linux-android-ranlib
-	export STRIP=$TOOLCHAIN/bin/arm-linux-androideabi-strip
-	export ANDROID_SYSROOT=./../../client-android/android-toolchain-21-arm/sysroot
-
-	elif [ $CURRENT_ABI = arm64-v8a ]
-	then
-	export AR=$TOOLCHAIN/bin/aarch64-linux-android-ar
-	export AS=$TOOLCHAIN/bin/aarch64-linux-android-as
-	export CC=$TOOLCHAIN/bin/aarch64-linux-android21-clang
-	export CXX=$TOOLCHAIN/bin/aarch64-linux-android21-clang++
-	export LD=$TOOLCHAIN/bin/aarch64-linux-android-ld
-	export RANLIB=$TOOLCHAIN/bin/aarch64-linux-android-ranlib
-	export STRIP=$TOOLCHAIN/bin/aarch64-linux-android-strip
-	export ANDROID_SYSROOT=./../../client-android/android-toolchain-21-arm64/sysroot
-
-	elif [ $CURRENT_ABI = x86_64 ]
-	then
-	export AR=$TOOLCHAIN/bin/x86_64-linux-android-ar
-	export AS=$TOOLCHAIN/bin/x86_64-linux-android-as
-	export CC=$TOOLCHAIN/bin/x86_64-linux-android21-clang
-	export CXX=$TOOLCHAIN/bin/x86_64-linux-android21-clang++
-	export LD=$TOOLCHAIN/bin/x86_64-linux-android-ld
-	export RANLIB=$TOOLCHAIN/bin/x86_64-linux-android-ranlib
-	export STRIP=$TOOLCHAIN/bin/x86_64-linux-android-strip
-	export ANDROID_SYSROOT=./../../client-android/android-toolchain-21-x86_64/sysroot
-
-	else
-	echo "ABI NOT OK" >&2
-	exit 1
-	fi
-
-	#=========================================================
-	#	CONTRIBS
-	#=========================================================
-	if [ $CURRENT_ABI = armeabi-v7a ]
-	then
-	CONTRIB_PLATFORM=arm-linux-androideabi
-
-	elif [ $CURRENT_ABI = arm64-v8a ]
-	then
-	CONTRIB_PLATFORM=aarch64-linux-android
-
-	elif [ $CURRENT_ABI = x86_64 ]
-	then
-	CONTRIB_PLATFORM=x86_64-linux-android
-	fi
-
-	# ASSETS
-	ANDROID_PROJECT_ASSETS=./../../client-android/ring-android/app/src/main/assets
-	# LIBS FOLDER
-	ANDROID_PROJECT_LIBS=./../../client-android/ring-android/app/src/main/libs/$CURRENT_ABI
-	#NDK SOURCES FOR cpufeatures
-	NDK_SOURCES=${ANDROID_NDK}/sources/android
-
-	#=========================================================
-	#	LD_FLAGS
-	#=========================================================
-	if [ $CURRENT_ABI = armeabi-v7a ]
-	then
-	export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/arm-linux-androideabi -L${ANDROID_SYSROOT}/usr/lib/arm-linux-androideabi/21"
-	elif [ $CURRENT_ABI = arm64-v8a ]
-	then
-	export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/aarch64-linux-android -L${ANDROID_SYSROOT}/usr/lib/aarch64-linux-android/21"
-	elif [ $CURRENT_ABI = x86_64 ]
-	then
-	export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/x86_64-linux-android -L${ANDROID_SYSROOT}/usr/lib/x86_64-linux-android/21"
-	fi
-
-	#=========================================================
-	#	Compile CPU FEATURES, NEEDED FOR OPENCV
-	#=========================================================
-	$CC -c $NDK_SOURCES/cpufeatures/cpu-features.c -o cpu-features.o -o cpu-features.o --sysroot=$ANDROID_SYSROOT
-
-	#=========================================================
-	#	Compile the plugin
-	#=========================================================
-
-	# Create so destination folder
-	mkdir -p lib/$CURRENT_ABI
-
-	# Create so destination folder
-    $CXX --std=c++14 -O3 -g -fPIC \
-	-Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
-	-shared \
-	-Wall -Wextra \
-	-Wno-unused-variable \
-	-Wno-unused-function \
-	-Wno-unused-parameter \
-	-DTFLITE \
-	-I"." \
-	-I${DAEMON_SRC} \
-	-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
-    -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
-    -I${LIBS_DIR}/_tensorflow_distribution/include/flatbuffers \
-	-I${LIBS_DIR}/_tensorflow_distribution/include \
-	-I${PLUGINS_LIB} \
-	main.cpp \
-	videoSubscriber.cpp \
-	pluginProcessor.cpp \
-    pluginMediaHandler.cpp \
-	TFInference.cpp \
-	pluginInference.cpp \
-	pluginParameters.cpp \
-	cpu-features.o \
-	-L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/ \
-	-L${LIBS_DIR}/_tensorflow_distribution/lib/${CURRENT_ABI}/ \
-	-lswscale \
-	-lavutil \
-	-lopencv_imgcodecs \
-	-lopencv_imgproc \
-	-lopencv_core \
-    -llibpng \
-    -ltensorflowlite \
-	-llog -lz \
-	--sysroot=$ANDROID_SYSROOT \
-	-o lib/$CURRENT_ABI/${SO_FILE_NAME}
-	# (above) Always put opencv_core after all other opencv libs when linking statically
-	# (above) Put libavutil after other ffmpeg libraries
-
-	cp ${LIBS_DIR}/_tensorflow_distribution/lib/${CURRENT_ABI}/libtensorflowlite.so lib/$CURRENT_ABI
-}
-
-
-mkdir ./data/models
-cp ./modelsSRC/mobilenet_v2_deeplab_v3_256_myquant.tflite ./data/models/mModel.tflite
-cp ./preferences-tflite.json ./data/preferences.json
-
-# Build the so
-for i in ${ANDROID_ABI}; do
-	CURRENT_ABI=$i
-	buildlib
-done
-
-#Export the plugin data folder
-mkdir -p ${DESTINATION_PATH}/jpl/${PLUGIN_NAME}/
-zip -r ${JPL_FILE_NAME} data manifest.json lib
-mv ${JPL_FILE_NAME} ${DESTINATION_PATH}/jpl/${PLUGIN_NAME}/
-
-# Cleanup
-# Remove cpu-features object after compilation
-rm cpu-features.o
-rm -rf lib
-rm -r ./data/models
-rm ./data/preferences.json
diff --git a/GreenScreen/buildtfcc.sh b/GreenScreen/buildtfcc.sh
deleted file mode 100644
index 2eef434..0000000
--- a/GreenScreen/buildtfcc.sh
+++ /dev/null
@@ -1,94 +0,0 @@
-#! /bin/bash
-# Build the plugin for the project
-if [ -z $DAEMON ]; then
-    DAEMON="./../../daemon"
-    echo "DAEMON not provided, building for ${DAEMON}"
-fi
-
-if [ -z $CUDALIBS ]; then
-    CUDALIBS=~/anaconda3/envs/tf114/lib/
-    echo "CUDALIBS not provided, building for ${CUDALIBS}"
-fi
-
-if [ -z $PROCESSOR ]; then
-    PROCESSOR=GPU
-    echo "PROCESSOR not defined, building for GPU"
-fi
-
-PLUGIN_NAME="GreenScreen"
-JPL_FILE_NAME=${PLUGIN_NAME}".jpl"
-SO_FILE_NAME="lib"${PLUGIN_NAME}".so"
-DAEMON_SRC="${DAEMON}/src"
-CONTRIB_PATH="${DAEMON}/contrib"
-DESTINATION_PATH="./../build/"
-PLUGINS_LIB="../lib"
-LIBS_DIR="/home/${USER}/Libs"
-
-
-CONTRIB_PLATFORM_CURT=x86_64
-CONTRIB_PLATFORM=${CONTRIB_PLATFORM_CURT}-linux-gnu
-
-mkdir -p lib/${CONTRIB_PLATFORM}
-mkdir -p ${DESTINATION_PATH}/jpl
-
-# Compile
-clang++ -std=c++17 -shared -fPIC \
--Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
--Wall -Wextra \
--Wno-unused-variable \
--Wno-unused-function \
--Wno-unused-parameter \
--D${PROCESSOR} \
--I"." \
--I${DAEMON_SRC} \
--I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
--I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
--I${LIBS_DIR}/_tensorflow_cc/include \
--I${LIBS_DIR}/_tensorflow_cc/include/third_party/eigen3 \
--I${PLUGINS_LIB} \
-main.cpp \
-videoSubscriber.cpp \
-pluginProcessor.cpp \
-pluginMediaHandler.cpp \
-TFInference.cpp \
-pluginInference.cpp \
-pluginParameters.cpp \
--L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/ \
--L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty/ \
--L${LIBS_DIR}/_tensorflow_cc/lib/${CONTRIB_PLATFORM}-gpu61/ \
--lswscale \
--lavutil \
--lopencv_imgcodecs \
--lopencv_imgproc \
--lopencv_core \
--ltensorflow_cc \
--lpng \
--o lib/${CONTRIB_PLATFORM}/${SO_FILE_NAME}
-# (above) Always put opencv_core after all other opencv libs
-# (above) Always put avutil after all other ffmpeg libs
-# (above) Always put png after all other libs
-
-cp ${LIBS_DIR}/_tensorflow_cc/lib/${CONTRIB_PLATFORM}-gpu61/libtensorflow_cc.so lib/$CONTRIB_PLATFORM/libtensorflow_cc.so.2
-cp /usr/lib/${CONTRIB_PLATFORM}/libswscale.so.4 lib/$CONTRIB_PLATFORM
-cp /usr/lib/${CONTRIB_PLATFORM}/libavutil.so.55 lib/$CONTRIB_PLATFORM
-cp /usr/lib/${CONTRIB_PLATFORM}/libpng16.so.16 lib/$CONTRIB_PLATFORM
-cp ${CUDALIBS}libcudart.so.10.0 lib/$CONTRIB_PLATFORM
-cp ${CUDALIBS}libcublas.so.10.0 lib/$CONTRIB_PLATFORM
-cp ${CUDALIBS}libcufft.so.10.0 lib/$CONTRIB_PLATFORM
-cp ${CUDALIBS}libcurand.so.10.0 lib/$CONTRIB_PLATFORM
-cp ${CUDALIBS}libcusolver.so.10.0 lib/$CONTRIB_PLATFORM
-cp ${CUDALIBS}libcusparse.so.10.0 lib/$CONTRIB_PLATFORM
-cp ${CUDALIBS}libcudnn.so.7 lib/$CONTRIB_PLATFORM
-
-mkdir ./data/models
-cp ./modelsSRC/mModel-resnet50float.pb ./data/models/mModel.pb
-cp ./preferences-tfcc.json ./data/preferences.json
-
-zip -r ${JPL_FILE_NAME} data manifest.json lib
-mv ${JPL_FILE_NAME} ${DESTINATION_PATH}/jpl/
-
-# Cleanup
-# Remove lib after compilation
-rm -rf lib
-rm ./data/models/mModel.pb
-rm ./data/preferences.json
diff --git a/GreenScreen/main.cpp b/GreenScreen/main.cpp
index 658eba0..b14e289 100644
--- a/GreenScreen/main.cpp
+++ b/GreenScreen/main.cpp
@@ -26,7 +26,7 @@
 #include "pluginMediaHandler.h"
 
 #ifdef WIN32
-#define EXPORT_PLUGIN __declspec (dllexport)
+#define EXPORT_PLUGIN __declspec(dllexport)
 #else
 #define EXPORT_PLUGIN
 #endif
@@ -34,9 +34,10 @@
 #define GreenScreen_VERSION_MAJOR 1
 #define GreenScreen_VERSION_MINOR 0
 
-extern "C" 
-{
-void pluginExit(void) { }
+extern "C" {
+void
+pluginExit(void)
+{}
 
 EXPORT_PLUGIN JAMI_PluginExitFunc
 JAMI_dynPluginInit(const JAMI_PluginAPI* api)
@@ -44,18 +45,18 @@
     std::cout << "**************************" << std::endl << std::endl;
     std::cout << "**  GREENSCREEN PLUGIN  **" << std::endl;
     std::cout << "**************************" << std::endl << std::endl;
-    std::cout << " Version " << GreenScreen_VERSION_MAJOR << "." << GreenScreen_VERSION_MINOR << std::endl;
+    std::cout << " Version " << GreenScreen_VERSION_MAJOR << "." << GreenScreen_VERSION_MINOR
+              << std::endl;
 
     // If invokeService doesn't return an error
-    if(api) 
-    {
+    if (api) {
         std::map<std::string, std::string> ppm;
         api->invokeService(api, "getPluginPreferences", &ppm);
         std::string dataPath;
         api->invokeService(api, "getPluginDataPath", &dataPath);
         auto fmp = std::make_unique<jami::PluginMediaHandler>(std::move(ppm), std::move(dataPath));
 
-        if(!api->manageComponent(api,"CallMediaHandlerManager", fmp.release())) {
+        if (!api->manageComponent(api, "CallMediaHandlerManager", fmp.release())) {
             return pluginExit;
         }
     }
diff --git a/GreenScreen/manifest.json b/GreenScreen/manifest.json
index d534b0d..49d8a79 100644
--- a/GreenScreen/manifest.json
+++ b/GreenScreen/manifest.json
@@ -1,5 +1,5 @@
 {
-	"name": "GreenScreen",
-	"description" : "GreenScreen Plugin with Tensorflow 2.1.1",
-	"version" : "1.0"
+    "name": "GreenScreen",
+    "description" : "GreenScreen Plugin with Tensorflow 2.1.1",
+    "version" : "1.0"
 }
diff --git a/GreenScreen/package.json b/GreenScreen/package.json
new file mode 100644
index 0000000..b5f0c31
--- /dev/null
+++ b/GreenScreen/package.json
@@ -0,0 +1,20 @@
+{
+    "name": "GreenScreen",
+    "version": "1.0",
+    "extractLibs": true,
+    "deps": [
+        "ffmpeg",
+        "opencv"],
+    "defines": [
+        "TFLITE=False",
+        "CPU=False"],
+    "custom_scripts": {
+        "pre_build": [
+            "mkdir msvc"
+        ],
+        "build": [
+            "cmake --build ./msvc --config Release"
+            ],
+        "post_build": []
+    }
+}
diff --git a/GreenScreen/pluginInference.cpp b/GreenScreen/pluginInference.cpp
index cb9d32a..9f7de15 100644
--- a/GreenScreen/pluginInference.cpp
+++ b/GreenScreen/pluginInference.cpp
@@ -15,51 +15,53 @@
  *
  *  You should have received a copy of the GNU General Public License
  *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301
+ * USA.
  */
 
 #include "pluginInference.h"
 // Std libraries
+#include "pluglog.h"
 #include <cstring>
 #include <numeric>
-#include "pluglog.h"
 
 const char sep = separator();
 const std::string TAG = "FORESEG";
 
 namespace jami {
 
-PluginInference::PluginInference(TFModel model) : TensorflowInference(model)
+PluginInference::PluginInference(TFModel model)
+    : TensorflowInference(model)
 {
 #ifndef TFLITE
-	//Initialize TENSORFLOW_CC lib
-	static const char* kFakeName = "fake program name";
-	int argc = 1;
-	char* fake_name_copy = strdup(kFakeName);
-	char** argv = &fake_name_copy;
-	tensorflow::port::InitMain(kFakeName, &argc, &argv);
-	if (argc > 1) {
-		Plog::log(Plog::LogPriority::INFO, "TENSORFLOW INIT", "Unknown argument " );
-	}
-	free(fake_name_copy);
-#endif	//TFLITE
+    // Initialize TENSORFLOW_CC lib
+    static const char* kFakeName = "fake program name";
+    int argc = 1;
+    char* fake_name_copy = strdup(kFakeName);
+    char** argv = &fake_name_copy;
+    tensorflow::port::InitMain(kFakeName, &argc, &argv);
+    if (argc > 1) {
+        Plog::log(Plog::LogPriority::INFO, "TENSORFLOW INIT", "Unknown argument ");
+    }
+    free(fake_name_copy);
+#endif // TFLITE
 }
 
-PluginInference::~PluginInference(){}
+PluginInference::~PluginInference() {}
 
 #ifdef TFLITE
 std::pair<uint8_t*, std::vector<int>>
 PluginInference::getInput()
 {
-	// We assume that we have only one input
-	// Get the input index
-	int input = interpreter->inputs()[0];
+    // We assume that we have only one input
+    // Get the input index
+    int input = interpreter->inputs()[0];
 
-	uint8_t *inputDataPointer = interpreter->typed_tensor<uint8_t>(input);
-	// Get the input dimensions vector
-	std::vector<int> dims = getTensorDimensions(input);
+    uint8_t* inputDataPointer = interpreter->typed_tensor<uint8_t>(input);
+    // Get the input dimensions vector
+    std::vector<int> dims = getTensorDimensions(input);
 
-	return std::make_pair(inputDataPointer, dims);
+    return std::make_pair(inputDataPointer, dims);
 }
 
 // // Types returned by tensorflow
@@ -81,142 +83,135 @@
 std::vector<float>
 PluginInference::masksPredictions() const
 {
-	int outputIndex = interpreter->outputs()[0];
-	std::vector<int> dims = getTensorDimensions(outputIndex);
-	int totalDimensions = 1;
-	for (size_t i = 0; i < dims.size(); i++)
-	{
-		totalDimensions *= dims[i];
-	}
-	std::vector<float> out;
+    int outputIndex = interpreter->outputs()[0];
+    std::vector<int> dims = getTensorDimensions(outputIndex);
+    int totalDimensions = 1;
+    for (size_t i = 0; i < dims.size(); i++) {
+        totalDimensions *= dims[i];
+    }
+    std::vector<float> out;
 
-	int type = interpreter->tensor(outputIndex)->type;
-	switch(type) {
-		case 1:
-		{
-			float* outputDataPointer = interpreter->typed_tensor<float>(outputIndex);
-			std::vector<float> output(outputDataPointer, outputDataPointer + totalDimensions);
-			out=std::vector<float>(output.begin(), output.end());
-			break;
-		}
-		case 2:
-		{
-			int* outputDataPointer = interpreter->typed_tensor<int>(outputIndex);
-			std::vector<int> output(outputDataPointer, outputDataPointer + totalDimensions);
-			out=std::vector<float>(output.begin(), output.end());
-			break;
-		}
-		case 4:
-		{
-			int64_t* outputDataPointer = interpreter->typed_tensor<int64_t>(outputIndex);
-			std::vector<int64_t> output(outputDataPointer, outputDataPointer + totalDimensions);
-			out=std::vector<float>(output.begin(), output.end());
-			break;
-		}
-	}
+    int type = interpreter->tensor(outputIndex)->type;
+    switch (type) {
+    case 1: {
+        float* outputDataPointer = interpreter->typed_tensor<float>(outputIndex);
+        std::vector<float> output(outputDataPointer, outputDataPointer + totalDimensions);
+        out = std::vector<float>(output.begin(), output.end());
+        break;
+    }
+    case 2: {
+        int* outputDataPointer = interpreter->typed_tensor<int>(outputIndex);
+        std::vector<int> output(outputDataPointer, outputDataPointer + totalDimensions);
+        out = std::vector<float>(output.begin(), output.end());
+        break;
+    }
+    case 4: {
+        int64_t* outputDataPointer = interpreter->typed_tensor<int64_t>(outputIndex);
+        std::vector<int64_t> output(outputDataPointer, outputDataPointer + totalDimensions);
+        out = std::vector<float>(output.begin(), output.end());
+        break;
+    }
+    }
 
-	return out;
+    return out;
 }
 
 void
 PluginInference::setExpectedImageDimensions()
 {
-	// We assume that we have only one input
-	// Get the input index
-	int input = interpreter->inputs()[0];
-	// Get the input dimensions vector
-	std::vector<int> dims = getTensorDimensions(input);
-	
-	imageWidth = dims.at(1);
-	imageHeight = dims.at(2);
-	imageNbChannels = dims.at(3);
+    // We assume that we have only one input
+    // Get the input index
+    int input = interpreter->inputs()[0];
+    // Get the input dimensions vector
+    std::vector<int> dims = getTensorDimensions(input);
+
+    imageWidth = dims.at(1);
+    imageHeight = dims.at(2);
+    imageNbChannels = dims.at(3);
 }
-#else //TFLITE
+#else // TFLITE
 // Given an image file name, read in the data, try to decode it as an image,
 // resize it to the requested size, and then scale the values as desired.
 void
 PluginInference::ReadTensorFromMat(const cv::Mat& image)
 {
-	imageTensor = tensorflow::Tensor(tensorflow::DataType::DT_FLOAT, tensorflow::TensorShape({ 1, image.cols, image.rows, 3 }));
-	float* p = imageTensor.flat<float>().data();
-	cv::Mat temp(image.rows, image.cols, CV_32FC3, p);
-	image.convertTo(temp, CV_32FC3);
+    imageTensor = tensorflow::Tensor(tensorflow::DataType::DT_FLOAT,
+                                     tensorflow::TensorShape({1, image.cols, image.rows, 3}));
+    float* p = imageTensor.flat<float>().data();
+    cv::Mat temp(image.rows, image.cols, CV_32FC3, p);
+    image.convertTo(temp, CV_32FC3);
 }
 
 std::vector<float>
 PluginInference::masksPredictions() const
 {
-	std::vector<int> dims;
-	int flatSize = 1;
-	int num_dimensions = outputs[0].shape().dims();
-	for(int ii_dim=0; ii_dim<num_dimensions; ii_dim++) {
-		dims.push_back(outputs[0].shape().dim_size(ii_dim));
-		flatSize *= outputs[0].shape().dim_size(ii_dim);
-	}
+    std::vector<int> dims;
+    int flatSize = 1;
+    int num_dimensions = outputs[0].shape().dims();
+    for (int ii_dim = 0; ii_dim < num_dimensions; ii_dim++) {
+        dims.push_back(outputs[0].shape().dim_size(ii_dim));
+        flatSize *= outputs[0].shape().dim_size(ii_dim);
+    }
 
-	std::vector<float> out;
-	int type = outputs[0].dtype();
+    std::vector<float> out;
+    int type = outputs[0].dtype();
 
-	switch(type) {
-		case tensorflow::DataType::DT_FLOAT:
-		{
-			for (int offset = 0; offset < flatSize; offset++) {
-				out.push_back(outputs[0].flat<float>()(offset));
-			}
-			break;
-		}
-		case tensorflow::DataType::DT_INT32:
-		{
-			for (int offset = 0; offset < flatSize; offset++) {
-				out.push_back(static_cast<float> (outputs[0].flat<tensorflow::int32>()(offset)));
-			}
-			break;
-		}
-		case tensorflow::DataType::DT_INT64:
-		{
-			for (int offset = 0; offset < flatSize; offset++) {
-				out.push_back(static_cast<float> (outputs[0].flat<tensorflow::int64>()(offset)));
-			}
-			break;
-		}
-		default:
-		{
-			for (int offset = 0; offset < flatSize; offset++) {
-				out.push_back(0);
-			}
-			break;
-		}
-	}
-	return out;
+    switch (type) {
+    case tensorflow::DataType::DT_FLOAT: {
+        for (int offset = 0; offset < flatSize; offset++) {
+            out.push_back(outputs[0].flat<float>()(offset));
+        }
+        break;
+    }
+    case tensorflow::DataType::DT_INT32: {
+        for (int offset = 0; offset < flatSize; offset++) {
+            out.push_back(static_cast<float>(outputs[0].flat<tensorflow::int32>()(offset)));
+        }
+        break;
+    }
+    case tensorflow::DataType::DT_INT64: {
+        for (int offset = 0; offset < flatSize; offset++) {
+            out.push_back(static_cast<float>(outputs[0].flat<tensorflow::int64>()(offset)));
+        }
+        break;
+    }
+    default: {
+        for (int offset = 0; offset < flatSize; offset++) {
+            out.push_back(0);
+        }
+        break;
+    }
+    }
+    return out;
 }
 
 void
 PluginInference::setExpectedImageDimensions()
 {
-	if (tfModel.dims[1] != 0)
-		imageWidth = tfModel.dims[1];
-	if (tfModel.dims[2] != 0)
-		imageHeight = tfModel.dims[2];
-	if (tfModel.dims[3] != 0)
-		imageNbChannels = tfModel.dims[3];
+    if (tfModel.dims[1] != 0)
+        imageWidth = tfModel.dims[1];
+    if (tfModel.dims[2] != 0)
+        imageHeight = tfModel.dims[2];
+    if (tfModel.dims[3] != 0)
+        imageNbChannels = tfModel.dims[3];
 }
 #endif
 
 int
 PluginInference::getImageWidth() const
 {
-	return imageWidth;
+    return imageWidth;
 }
 
 int
 PluginInference::getImageHeight() const
 {
-	return imageHeight;
+    return imageHeight;
 }
 
 int
 PluginInference::getImageNbChannels() const
 {
-	return imageNbChannels;
+    return imageNbChannels;
 }
 } // namespace jami
diff --git a/GreenScreen/pluginInference.h b/GreenScreen/pluginInference.h
index 37436af..8d405f8 100644
--- a/GreenScreen/pluginInference.h
+++ b/GreenScreen/pluginInference.h
@@ -15,7 +15,8 @@
  *
  *  You should have received a copy of the GNU General Public License
  *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301
+ * USA.
  */
 
 #pragma once
@@ -26,59 +27,58 @@
 #include <opencv2/core.hpp>
 // STL
 #include <array>
-#include <vector>
-#include <tuple>
 #include <iostream>
+#include <tuple>
+#include <vector>
 
 namespace jami {
 
-class PluginInference : public TensorflowInference {
+class PluginInference : public TensorflowInference
+{
 public:
-	/**
-	 * @brief PluginInference
-	 * Is a type of supervised learning where we detect objects in images
-	 * Draw a bounding boxes around them
-	 * @param model
-	 */
-	PluginInference(TFModel model);
-	~PluginInference();
+    /**
+     * @brief PluginInference
+     * Is a type of supervised learning where we detect objects in images
+     * Draw a bounding boxes around them
+     * @param model
+     */
+    PluginInference(TFModel model);
+    ~PluginInference();
 
 #ifdef TFLITE
-	/**
-	 * @brief getInput
-	 * Returns the input where to fill the data
-	 * Use this method if you know what you are doing, all the necessary checks
-	 * on dimensions must be done on your part
-	 * @return std::tuple<uint8_t *, std::vector<int>>
-	 * The first element in the tuple is the pointer to the storage location
-	 * The second element is a dimensions vector that will helps you make
-	 * The necessary checks to make your data size match the input one
-	 */
-	std::pair<uint8_t*, std::vector<int>> getInput();
+    /**
+     * @brief getInput
+     * Returns the input where to fill the data
+     * Use this method if you know what you are doing, all the necessary checks
+     * on dimensions must be done on your part
+     * @return std::tuple<uint8_t *, std::vector<int>>
+     * The first element in the tuple is the pointer to the storage location
+     * The second element is a dimensions vector that will helps you make
+     * The necessary checks to make your data size match the input one
+     */
+    std::pair<uint8_t*, std::vector<int>> getInput();
 
 #else
-	void ReadTensorFromMat(const cv::Mat& image);
+    void ReadTensorFromMat(const cv::Mat& image);
 
-#endif //TFLITE
+#endif // TFLITE
 
-	std::vector<float> masksPredictions() const;
+    std::vector<float> masksPredictions() const;
 
+    /**
+     * @brief setExpectedImageDimensions
+     * Sets imageWidth and imageHeight from the sources
+     */
+    void setExpectedImageDimensions();
 
-	/**
-	 * @brief setExpectedImageDimensions
-	 * Sets imageWidth and imageHeight from the sources
-	 */
-	void setExpectedImageDimensions();
-
-	// Getters
-	int getImageWidth() const;
-	int getImageHeight() const;
-	int getImageNbChannels() const;
-
+    // Getters
+    int getImageWidth() const;
+    int getImageHeight() const;
+    int getImageNbChannels() const;
 
 private:
-	int imageWidth = 0;
-	int imageHeight = 0;
-	int imageNbChannels = 0;
+    int imageWidth = 0;
+    int imageHeight = 0;
+    int imageNbChannels = 0;
 };
 } // namespace jami
diff --git a/GreenScreen/pluginMediaHandler.cpp b/GreenScreen/pluginMediaHandler.cpp
index c384e63..a9e4fe0 100644
--- a/GreenScreen/pluginMediaHandler.cpp
+++ b/GreenScreen/pluginMediaHandler.cpp
@@ -15,7 +15,8 @@
  *
  *  You should have received a copy of the GNU General Public License
  *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301
+ * USA.
  */
 
 #include "pluginMediaHandler.h"
@@ -28,83 +29,86 @@
 
 namespace jami {
 
-PluginMediaHandler::PluginMediaHandler(std::map<std::string, std::string>&& ppm, std::string&& datapath):
-	datapath_{datapath}, ppm_{ppm}
+PluginMediaHandler::PluginMediaHandler(std::map<std::string, std::string>&& ppm,
+                                       std::string&& datapath)
+    : datapath_ {datapath}
+    , ppm_ {ppm}
 {
-	setGlobalPluginParameters(ppm_);
-	setId(datapath_);
-	mVS = std::make_shared<VideoSubscriber>(datapath_);
+    setGlobalPluginParameters(ppm_);
+    setId(datapath_);
+    mVS = std::make_shared<VideoSubscriber>(datapath_);
 }
 
 void
 PluginMediaHandler::notifyAVFrameSubject(const StreamData& data, jami::avSubjectPtr subject)
 {
-	Plog::log(Plog::LogPriority::INFO, TAG, "IN AVFRAMESUBJECT");
-	std::ostringstream oss;
-	std::string direction = data.direction ? "Receive" : "Preview";
-	oss << "NEW SUBJECT: [" << data.id << "," << direction << "]" << std::endl;
+    Plog::log(Plog::LogPriority::INFO, TAG, "IN AVFRAMESUBJECT");
+    std::ostringstream oss;
+    std::string direction = data.direction ? "Receive" : "Preview";
+    oss << "NEW SUBJECT: [" << data.id << "," << direction << "]" << std::endl;
 
-	bool preferredStreamDirection = false;
-	if (!ppm_.empty() && ppm_.find("streamslist") != ppm_.end()) {
-		Plog::log(Plog::LogPriority::INFO, TAG, "SET PARAMETERS");
-		preferredStreamDirection = ppm_.at("streamslist")=="in"?true:false;
-	}
-	oss << "preferredStreamDirection " << preferredStreamDirection << std::endl;
-	if (data.type == StreamType::video && !data.direction && data.direction == preferredStreamDirection) {
-		subject->attach(mVS.get()); // my image
-		oss << "got my sent image attached" << std::endl;
-	}
-	else if (data.type == StreamType::video && data.direction && data.direction == preferredStreamDirection)
-		subject->attach(mVS.get()); // the image I receive from the others on the call
+    bool preferredStreamDirection = false;
+    if (!ppm_.empty() && ppm_.find("streamslist") != ppm_.end()) {
+        Plog::log(Plog::LogPriority::INFO, TAG, "SET PARAMETERS");
+        preferredStreamDirection = ppm_.at("streamslist") == "in" ? true : false;
+    }
+    oss << "preferredStreamDirection " << preferredStreamDirection << std::endl;
+    if (data.type == StreamType::video && !data.direction
+        && data.direction == preferredStreamDirection) {
+        subject->attach(mVS.get()); // my image
+        oss << "got my sent image attached" << std::endl;
+    } else if (data.type == StreamType::video && data.direction
+               && data.direction == preferredStreamDirection)
+        subject->attach(mVS.get()); // the image I receive from the others on the call
 
-	Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+    Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
 }
 
 std::map<std::string, std::string>
 PluginMediaHandler::getCallMediaHandlerDetails()
 {
-	std::map<std::string, std::string> mediaHandlerDetails = {};
-	mediaHandlerDetails["name"] = NAME;
-	mediaHandlerDetails["iconPath"] = datapath_ + sep + "icon.png";
-	mediaHandlerDetails["pluginId"] = id();
+    std::map<std::string, std::string> mediaHandlerDetails = {};
+    mediaHandlerDetails["name"] = NAME;
+    mediaHandlerDetails["iconPath"] = datapath_ + sep + "icon.png";
+    mediaHandlerDetails["pluginId"] = id();
 
-	return mediaHandlerDetails;
+    return mediaHandlerDetails;
 }
 
 void
 PluginMediaHandler::setPreferenceAttribute(const std::string& key, const std::string& value)
 {
-	auto it = ppm_.find(key);
-	if (it != ppm_.end()) {
-		if (ppm_[key] != value) {
-			ppm_[key] = value;
-			if (key == "background") {
-				mVS->setBackground(value);
-			}
-		}
-	}
+    auto it = ppm_.find(key);
+    if (it != ppm_.end()) {
+        if (ppm_[key] != value) {
+            ppm_[key] = value;
+            if (key == "background") {
+                mVS->setBackground(value);
+            }
+        }
+    }
 }
 
 bool
 PluginMediaHandler::preferenceMapHasKey(const std::string& key)
 {
-	if (key == "background") {
-		return true;
-	}
-	return false;
+    if (key == "background") {
+        return true;
+    }
+    return false;
 }
 
 void
 PluginMediaHandler::detach()
 {
-	mVS->detach();
+    mVS->detach();
 }
 
 PluginMediaHandler::~PluginMediaHandler()
 {
-	std::ostringstream oss;
-	oss << " ~FORESEG Plugin" << std::endl;
-	Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
-	detach();
+    std::ostringstream oss;
+    oss << " ~FORESEG Plugin" << std::endl;
+    Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+    detach();
 }
-}
+} // namespace jami
diff --git a/GreenScreen/pluginMediaHandler.h b/GreenScreen/pluginMediaHandler.h
index 3bf71de..4748609 100644
--- a/GreenScreen/pluginMediaHandler.h
+++ b/GreenScreen/pluginMediaHandler.h
@@ -15,12 +15,13 @@
  *
  *  You should have received a copy of the GNU General Public License
  *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301
+ * USA.
  */
 
 #pragma once
 
-//Project
+// Project
 #include "videoSubscriber.h"
 
 // Plugin
@@ -31,24 +32,25 @@
 
 namespace jami {
 
-class PluginMediaHandler : public jami::CallMediaHandler {
+class PluginMediaHandler : public jami::CallMediaHandler
+{
 public:
-	PluginMediaHandler(std::map<std::string, std::string>&& ppm, std::string&& dataPath);
-	~PluginMediaHandler() override;
+    PluginMediaHandler(std::map<std::string, std::string>&& ppm, std::string&& dataPath);
+    ~PluginMediaHandler() override;
 
-	virtual void notifyAVFrameSubject(const StreamData& data, avSubjectPtr subject) override;
-	virtual std::map<std::string, std::string> getCallMediaHandlerDetails() override;
+    virtual void notifyAVFrameSubject(const StreamData& data, avSubjectPtr subject) override;
+    virtual std::map<std::string, std::string> getCallMediaHandlerDetails() override;
 
-	virtual void detach() override;
-	virtual void setPreferenceAttribute(const std::string& key, const std::string& value) override;
-	virtual bool preferenceMapHasKey(const std::string& key) override;
+    virtual void detach() override;
+    virtual void setPreferenceAttribute(const std::string& key, const std::string& value) override;
+    virtual bool preferenceMapHasKey(const std::string& key) override;
 
-	std::shared_ptr<VideoSubscriber> mVS;
+    std::shared_ptr<VideoSubscriber> mVS;
 
-	std::string dataPath() const { return datapath_; }
+    std::string dataPath() const { return datapath_; }
 
 private:
-	const std::string datapath_;
-	std::map<std::string, std::string> ppm_;
+    const std::string datapath_;
+    std::map<std::string, std::string> ppm_;
 };
-}
+} // namespace jami
diff --git a/GreenScreen/pluginParameters.cpp b/GreenScreen/pluginParameters.cpp
index 0729cbb..2d40b2f 100644
--- a/GreenScreen/pluginParameters.cpp
+++ b/GreenScreen/pluginParameters.cpp
@@ -18,7 +18,7 @@
  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
  */
 
-#include "pluginParameters.h"// Logger
+#include "pluginParameters.h" // Logger
 #include "pluglog.h"
 
 PluginParameters pluginParameters;
@@ -27,30 +27,31 @@
 setGlobalPluginParameters(std::map<std::string, std::string> pp)
 {
     if (!pp.empty()) {
-        if(pp.find("streamslist") != pp.end()) {
+        if (pp.find("streamslist") != pp.end()) {
             pluginParameters.stream = pp.at("streamslist");
             Plog::log(Plog::LogPriority::INFO, "GLOBAL STREAM ", pluginParameters.stream);
         }
-        if(pp.find("modellist") != pp.end()) {
+        if (pp.find("modellist") != pp.end()) {
             pluginParameters.model = pp.at("modellist");
             Plog::log(Plog::LogPriority::INFO, "GLOBAL MODEL ", pluginParameters.model);
         }
-        if(pp.find("background") != pp.end()) {
+        if (pp.find("background") != pp.end()) {
             pluginParameters.image = pp.at("background");
             Plog::log(Plog::LogPriority::INFO, "GLOBAL IMAGE ", pluginParameters.image);
         }
     }
 }
 
-void getGlobalPluginParameters(PluginParameters* mPluginParameters)
+void
+getGlobalPluginParameters(PluginParameters* mPluginParameters)
 {
     mPluginParameters->image = pluginParameters.image;
     mPluginParameters->model = pluginParameters.model;
     mPluginParameters->stream = pluginParameters.stream;
 }
 
-
-PluginParameters* getGlobalPluginParameters()
+PluginParameters*
+getGlobalPluginParameters()
 {
     return &pluginParameters;
 }
\ No newline at end of file
diff --git a/GreenScreen/pluginParameters.h b/GreenScreen/pluginParameters.h
index c774aeb..6371d44 100644
--- a/GreenScreen/pluginParameters.h
+++ b/GreenScreen/pluginParameters.h
@@ -24,19 +24,20 @@
 #include <string>
 #include <map>
 
-struct PluginParameters {
+struct PluginParameters
+{
     std::string stream = "out";
 #ifdef TFLITE
     bool useGPU = false;
     std::string model = "mModel.tflite";
 #else
-    #ifndef CPU
-        bool useGPU = true;
-    #else
-        bool useGPU = false;
-    #endif
-        std::string model = "mModel.pb";
-#endif //TFLITE
+#ifndef CPU
+    bool useGPU = true;
+#else
+    bool useGPU = false;
+#endif
+    std::string model = "mModel.pb";
+#endif // TFLITE
     std::string image = "background2.png";
 };
 
diff --git a/GreenScreen/pluginProcessor.cpp b/GreenScreen/pluginProcessor.cpp
index d289b87..4ec733e 100644
--- a/GreenScreen/pluginProcessor.cpp
+++ b/GreenScreen/pluginProcessor.cpp
@@ -15,7 +15,8 @@
  *
  *  You should have received a copy of the GNU General Public License
  *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301
+ * USA.
  */
 
 #include "pluginProcessor.h"
@@ -23,9 +24,9 @@
 #include <algorithm>
 #include <cstring>
 // OpenCV headers
-#include <opencv2/imgproc.hpp>
-#include <opencv2/imgcodecs.hpp>
 #include <opencv2/core.hpp>
+#include <opencv2/imgcodecs.hpp>
+#include <opencv2/imgproc.hpp>
 // Logger
 #include <pluglog.h>
 
@@ -39,267 +40,262 @@
 
 PluginParameters* mPluginParameters = getGlobalPluginParameters();
 
-namespace jami
-{
+namespace jami {
 
-PluginProcessor::PluginProcessor(const std::string& dataPath):
-pluginInference{TFModel{dataPath + sep + "models" + sep + mPluginParameters->model}}
+PluginProcessor::PluginProcessor(const std::string& dataPath)
+    : pluginInference {TFModel {dataPath + sep + "models" + sep + mPluginParameters->model}}
 {
-	initModel();
-	setBackgroundImage(mPluginParameters->image);
+    initModel();
+    setBackgroundImage(mPluginParameters->image);
 }
 
 void
 PluginProcessor::setBackgroundImage(const std::string& backgroundPath)
 {
-	cv::Size size = cv::Size{0,0};
+    cv::Size size = cv::Size {0, 0};
 
-	if (!backgroundImage.empty())
-		size = backgroundImage.size();
+    if (!backgroundImage.empty())
+        size = backgroundImage.size();
 
-	cv::Mat newBackgroundImage = cv::imread(backgroundPath);
-	if (newBackgroundImage.cols == 0) {
-		Plog::log(Plog::LogPriority::ERR, TAG, "Background image not Loaded");
-	}
-	else {
-		Plog::log(Plog::LogPriority::INFO, TAG, "Background image Loaded");
-		cv::cvtColor(newBackgroundImage, newBackgroundImage, cv::COLOR_BGR2RGB);
-		newBackgroundImage.convertTo(newBackgroundImage, CV_32FC3);
-		if (size.height) {
-			cv::resize(newBackgroundImage, newBackgroundImage, size);
-			backgroundRotation = 0;
-		}
-		backgroundImage = newBackgroundImage.clone();
-		newBackgroundImage.release();
-		hasBackground_ = true;
-	}
+    cv::Mat newBackgroundImage = cv::imread(backgroundPath);
+    if (newBackgroundImage.cols == 0) {
+        Plog::log(Plog::LogPriority::ERR, TAG, "Background image not Loaded");
+    } else {
+        Plog::log(Plog::LogPriority::INFO, TAG, "Background image Loaded");
+        cv::cvtColor(newBackgroundImage, newBackgroundImage, cv::COLOR_BGR2RGB);
+        newBackgroundImage.convertTo(newBackgroundImage, CV_32FC3);
+        if (size.height) {
+            cv::resize(newBackgroundImage, newBackgroundImage, size);
+            backgroundRotation = 0;
+        }
+        backgroundImage = newBackgroundImage.clone();
+        newBackgroundImage.release();
+        hasBackground_ = true;
+    }
 }
 
 void
 PluginProcessor::initModel()
 {
-	try {
-		pluginInference.init();
-	}
-	catch (std::exception& e) {
-		Plog::log(Plog::LogPriority::ERR, TAG, e.what());
-	}
-	std::ostringstream oss;
-	oss << "Model is allocated " << pluginInference.isAllocated();
-	Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+    try {
+        pluginInference.init();
+    } catch (std::exception& e) {
+        Plog::log(Plog::LogPriority::ERR, TAG, e.what());
+    }
+    std::ostringstream oss;
+    oss << "Model is allocated " << pluginInference.isAllocated();
+    Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
 }
 
-
 #ifdef TFLITE
 void
 PluginProcessor::feedInput(const cv::Mat& frame)
 {
-	auto pair = pluginInference.getInput();
-	uint8_t* inputPointer = pair.first;
+    auto pair = pluginInference.getInput();
+    uint8_t* inputPointer = pair.first;
 
-	cv::Mat temp(frame.rows, frame.cols, CV_8UC3, inputPointer);
-	frame.convertTo(temp, CV_8UC3);
+    cv::Mat temp(frame.rows, frame.cols, CV_8UC3, inputPointer);
+    frame.convertTo(temp, CV_8UC3);
 
-	inputPointer = nullptr;
+    inputPointer = nullptr;
 }
 #else
 void
 PluginProcessor::feedInput(const cv::Mat& frame)
 {
-	pluginInference.ReadTensorFromMat(frame);
+    pluginInference.ReadTensorFromMat(frame);
 }
-#endif //TFLITE
+#endif // TFLITE
 
 int
 PluginProcessor::getBackgroundRotation()
 {
-	return backgroundRotation;
+    return backgroundRotation;
 }
 
 void
 PluginProcessor::setBackgroundRotation(int angle)
 {
-	if (backgroundRotation != angle && (backgroundRotation - angle) != 0) {
-		switch (backgroundRotation - angle) {
-			case 90:
-				cv::rotate(backgroundImage, backgroundImage, cv::ROTATE_90_CLOCKWISE);
-				break;
-			case 180:
-				cv::rotate(backgroundImage, backgroundImage, cv::ROTATE_180);
-				break;
-			case -180:
-				cv::rotate(backgroundImage, backgroundImage, cv::ROTATE_180);
-				break;
-			case -90:
-				cv::rotate(backgroundImage, backgroundImage, cv::ROTATE_90_COUNTERCLOCKWISE);
-				break;
-		}
-		backgroundRotation = angle;
-	}
+    if (backgroundRotation != angle && (backgroundRotation - angle) != 0) {
+        switch (backgroundRotation - angle) {
+        case 90:
+            cv::rotate(backgroundImage, backgroundImage, cv::ROTATE_90_CLOCKWISE);
+            break;
+        case 180:
+            cv::rotate(backgroundImage, backgroundImage, cv::ROTATE_180);
+            break;
+        case -180:
+            cv::rotate(backgroundImage, backgroundImage, cv::ROTATE_180);
+            break;
+        case -90:
+            cv::rotate(backgroundImage, backgroundImage, cv::ROTATE_90_COUNTERCLOCKWISE);
+            break;
+        }
+        backgroundRotation = angle;
+    }
 }
 
 void
-PluginProcessor::computePredictions() 
+PluginProcessor::computePredictions()
 {
-	// Run the graph
-	pluginInference.runGraph();
-	auto predictions = pluginInference.masksPredictions();
+    // Run the graph
+    pluginInference.runGraph();
+    auto predictions = pluginInference.masksPredictions();
 
-	// Save the predictions
-	computedMask = predictions;
+    // Save the predictions
+    computedMask = predictions;
 }
 
 void
 PluginProcessor::printMask()
 {
-	for (size_t i = 0; i < computedMask.size(); i++)
-	{
-		// Log the predictions
-		std::ostringstream oss;
-		oss << "\nclass: "<< computedMask[i] << std::endl;
-		Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
-	}
+    for (size_t i = 0; i < computedMask.size(); i++) {
+        // Log the predictions
+        std::ostringstream oss;
+        oss << "\nclass: " << computedMask[i] << std::endl;
+        Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+    }
 }
 
-
 void
 copyByLine(uchar* frameData, uchar* applyMaskData, const int lineSize, cv::Size size)
 {
-	if (3 * size.width == lineSize) {
-		std::memcpy(frameData, applyMaskData, size.height * size.width * 3);;
-	}
-	else {
-		int rows = size.height;
-		int offset = 0;
-		int maskoffset = 0;
-		for (int i = 0; i < rows; i++) {
-			std::memcpy(frameData + offset, applyMaskData + maskoffset, lineSize);
-			offset += lineSize;
-			maskoffset += 3 * size.width;
-		}
-	}
+    if (3 * size.width == lineSize) {
+        std::memcpy(frameData, applyMaskData, size.height * size.width * 3);
+        ;
+    } else {
+        int rows = size.height;
+        int offset = 0;
+        int maskoffset = 0;
+        for (int i = 0; i < rows; i++) {
+            std::memcpy(frameData + offset, applyMaskData + maskoffset, lineSize);
+            offset += lineSize;
+            maskoffset += 3 * size.width;
+        }
+    }
 }
 
 void
-PluginProcessor::drawMaskOnFrame(cv::Mat& frame,
-								 cv::Mat& frameReduced, 
-								 std::vector<float>computedMask, 
-								 int lineSize, int angle)
+PluginProcessor::drawMaskOnFrame(
+    cv::Mat& frame, cv::Mat& frameReduced, std::vector<float> computedMask, int lineSize, int angle)
 {
-	if (computedMask.empty()) {
-		return;
-	}
-	if (previousMasks[0].empty()) {
-		previousMasks[0] = cv::Mat(frameReduced.rows, frameReduced.cols, CV_32FC1, double(0.));
-		previousMasks[1] = cv::Mat(frameReduced.rows, frameReduced.cols, CV_32FC1, double(0.));
-	}	
-	int maskSize = static_cast<int> (std::sqrt(computedMask.size()));
-	cv::Mat maskImg(maskSize, maskSize, CV_32FC1, computedMask.data());
+    if (computedMask.empty()) {
+        return;
+    }
+    if (previousMasks[0].empty()) {
+        previousMasks[0] = cv::Mat(frameReduced.rows, frameReduced.cols, CV_32FC1, double(0.));
+        previousMasks[1] = cv::Mat(frameReduced.rows, frameReduced.cols, CV_32FC1, double(0.));
+    }
+    int maskSize = static_cast<int>(std::sqrt(computedMask.size()));
+    cv::Mat maskImg(maskSize, maskSize, CV_32FC1, computedMask.data());
 
-	rotateFrame(-angle, maskImg);
+    rotateFrame(-angle, maskImg);
 #ifdef TFLITE
-	for (int i = 0; i < maskImg.cols; i++) {
-		for (int j = 0; j < maskImg.rows; j++) {
-			if (maskImg.at<float>(j, i) == 15)
-				maskImg.at<float>(j, i) = 255.;
-			else
-				maskImg.at<float>(j, i) = (float)((int)((0.6 * maskImg.at<float>(j, i) + 0.3 * previousMasks[0].at<float>(j, i) + 0.1 * previousMasks[1].at<float>(j, i))) % 256);
-		}
-	}
+    for (int i = 0; i < maskImg.cols; i++) {
+        for (int j = 0; j < maskImg.rows; j++) {
+            if (maskImg.at<float>(j, i) == 15)
+                maskImg.at<float>(j, i) = 255.;
+            else
+                maskImg.at<float>(j, i) = (float) ((int) ((0.6 * maskImg.at<float>(j, i)
+                                                           + 0.3 * previousMasks[0].at<float>(j, i)
+                                                           + 0.1 * previousMasks[1].at<float>(j, i)))
+                                                   % 256);
+        }
+    }
 #else // TFLITE
-	cv::resize(maskImg, maskImg, cv::Size(frameReduced.cols, frameReduced.rows));
+    cv::resize(maskImg, maskImg, cv::Size(frameReduced.cols, frameReduced.rows));
 
-	double m, M;
-	cv::minMaxLoc(maskImg, &m, &M);
+    double m, M;
+    cv::minMaxLoc(maskImg, &m, &M);
 
-	if (M < 2) { //avoid detection if there is any one in frame
-		maskImg = 0. * maskImg;
-	}
-	else {
-		for (int i = 0; i < maskImg.cols; i++) {
-			for (int j = 0; j < maskImg.rows; j++) {
-				maskImg.at<float>(j, i) = (maskImg.at<float>(j, i) - m) / (M - m);
+    if (M < 2) { // avoid detection if there is any one in frame
+        maskImg = 0. * maskImg;
+    } else {
+        for (int i = 0; i < maskImg.cols; i++) {
+            for (int j = 0; j < maskImg.rows; j++) {
+                maskImg.at<float>(j, i) = (maskImg.at<float>(j, i) - m) / (M - m);
 
-				if (maskImg.at<float>(j, i) < 0.4)
-					maskImg.at<float>(j, i) = 0.;
-				else if (maskImg.at<float>(j, i) < 0.7) {
-					float value = maskImg.at<float>(j, i) * 0.6 + previousMasks[0].at<float>(j, i) * 0.3 + previousMasks[1].at<float>(j, i) * 0.1;
-					maskImg.at<float>(j, i) = 0.;
-					if (value > 0.7)
-						maskImg.at<float>(j, i) = 1.;
-				}
-				else
-					maskImg.at<float>(j, i) = 1.;
-			}
-		}
-	}	
+                if (maskImg.at<float>(j, i) < 0.4)
+                    maskImg.at<float>(j, i) = 0.;
+                else if (maskImg.at<float>(j, i) < 0.7) {
+                    float value = maskImg.at<float>(j, i) * 0.6
+                                  + previousMasks[0].at<float>(j, i) * 0.3
+                                  + previousMasks[1].at<float>(j, i) * 0.1;
+                    maskImg.at<float>(j, i) = 0.;
+                    if (value > 0.7)
+                        maskImg.at<float>(j, i) = 1.;
+                } else
+                    maskImg.at<float>(j, i) = 1.;
+            }
+        }
+    }
 #endif
 
-	previousMasks[1] = previousMasks[0].clone();
-	previousMasks[0] = maskImg.clone();
+    previousMasks[1] = previousMasks[0].clone();
+    previousMasks[0] = maskImg.clone();
 
-	kSize = cv::Size(maskImg.cols * 0.05, maskImg.rows * 0.05);
-	if (kSize.height % 2 == 0)
-		kSize.height -= 1;
-	if (kSize.width % 2 == 0)
-		kSize.width -= 1;
+    kSize = cv::Size(maskImg.cols * 0.05, maskImg.rows * 0.05);
+    if (kSize.height % 2 == 0)
+        kSize.height -= 1;
+    if (kSize.width % 2 == 0)
+        kSize.width -= 1;
 
 #ifndef TFLITE
-	cv::dilate(maskImg, maskImg, cv::getStructuringElement(cv::MORPH_CROSS, kSize));
-	maskImg = maskImg * 255.;
+    cv::dilate(maskImg, maskImg, cv::getStructuringElement(cv::MORPH_CROSS, kSize));
+    maskImg = maskImg * 255.;
 #endif
-	GaussianBlur (maskImg, maskImg, kSize, 0); //mask from 0 to 255.
-	maskImg = maskImg / 255.;
+    GaussianBlur(maskImg, maskImg, kSize, 0); // mask from 0 to 255.
+    maskImg = maskImg / 255.;
 
-	cv::Mat applyMask = frameReduced.clone();
-	cv::Mat roiMaskImg = maskImg.clone();
-	cv::Mat roiMaskImgComplementary = 1. - roiMaskImg; //mask from 1. to 0
+    cv::Mat applyMask = frameReduced.clone();
+    cv::Mat roiMaskImg = maskImg.clone();
+    cv::Mat roiMaskImgComplementary = 1. - roiMaskImg; // mask from 1. to 0
 
-	std::vector<cv::Mat> channels;
-	std::vector<cv::Mat> channelsComplementary;
+    std::vector<cv::Mat> channels;
+    std::vector<cv::Mat> channelsComplementary;
 
-	channels.emplace_back(roiMaskImg);
-	channels.emplace_back(roiMaskImg);
-	channels.emplace_back(roiMaskImg);
-	channelsComplementary.emplace_back(roiMaskImgComplementary);
-	channelsComplementary.emplace_back(roiMaskImgComplementary);
-	channelsComplementary.emplace_back(roiMaskImgComplementary);
+    channels.emplace_back(roiMaskImg);
+    channels.emplace_back(roiMaskImg);
+    channels.emplace_back(roiMaskImg);
+    channelsComplementary.emplace_back(roiMaskImgComplementary);
+    channelsComplementary.emplace_back(roiMaskImgComplementary);
+    channelsComplementary.emplace_back(roiMaskImgComplementary);
 
-	cv::merge(channels, roiMaskImg);
-	cv::merge(channelsComplementary, roiMaskImgComplementary);
+    cv::merge(channels, roiMaskImg);
+    cv::merge(channelsComplementary, roiMaskImgComplementary);
 
-	int origType = frameReduced.type();
-	int roiMaskType = roiMaskImg.type();
+    int origType = frameReduced.type();
+    int roiMaskType = roiMaskImg.type();
 
-	applyMask.convertTo(applyMask, roiMaskType);
-	applyMask = applyMask.mul(roiMaskImg);
-	applyMask += backgroundImage.mul(roiMaskImgComplementary);
-	applyMask.convertTo(applyMask, origType);
+    applyMask.convertTo(applyMask, roiMaskType);
+    applyMask = applyMask.mul(roiMaskImg);
+    applyMask += backgroundImage.mul(roiMaskImgComplementary);
+    applyMask.convertTo(applyMask, origType);
 
-	cv::resize(applyMask, applyMask, cv::Size(frame.cols, frame.rows));
+    cv::resize(applyMask, applyMask, cv::Size(frame.cols, frame.rows));
 
-	copyByLine(frame.data, applyMask.data, lineSize, cv::Size(frame.cols, frame.rows));
+    copyByLine(frame.data, applyMask.data, lineSize, cv::Size(frame.cols, frame.rows));
 }
 
 void
 PluginProcessor::rotateFrame(int angle, cv::Mat& mat)
 {
-	if (angle != 0) {
-		switch (angle) {
-			case -90:
-				cv::rotate(mat, mat, cv::ROTATE_90_COUNTERCLOCKWISE);
-				break;
-			case 180:
-				cv::rotate(mat, mat, cv::ROTATE_180);
-				break;
-			case -180:
-				cv::rotate(mat, mat, cv::ROTATE_180);
-				break;
-			case 90:
-				cv::rotate(mat, mat, cv::ROTATE_90_CLOCKWISE);
-				break;
-		}
-	}
+    if (angle != 0) {
+        switch (angle) {
+        case -90:
+            cv::rotate(mat, mat, cv::ROTATE_90_COUNTERCLOCKWISE);
+            break;
+        case 180:
+            cv::rotate(mat, mat, cv::ROTATE_180);
+            break;
+        case -180:
+            cv::rotate(mat, mat, cv::ROTATE_180);
+            break;
+        case 90:
+            cv::rotate(mat, mat, cv::ROTATE_90_CLOCKWISE);
+            break;
+        }
+    }
 }
 
 bool
diff --git a/GreenScreen/pluginProcessor.h b/GreenScreen/pluginProcessor.h
index 8224d80..528988f 100644
--- a/GreenScreen/pluginProcessor.h
+++ b/GreenScreen/pluginProcessor.h
@@ -15,18 +15,19 @@
  *
  *  You should have received a copy of the GNU General Public License
  *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301
+ * USA.
  */
 
 #pragma once
 // STL
 #include <condition_variable>
 #include <cstdint>
+#include <map>
 #include <memory>
 #include <mutex>
 #include <thread>
 #include <vector>
-#include <map>
 // Filters
 #include "pluginInference.h"
 // AvFrame
@@ -41,50 +42,55 @@
 
 namespace jami {
 
-class PluginProcessor {
+class PluginProcessor
+{
 public:
-	PluginProcessor(const std::string& dataPath);
+    PluginProcessor(const std::string& dataPath);
 
-	void initModel();
-	/**
-	 * @brief feedInput
-	 * Takes a frame and feeds it to the model storage for predictions
-	 * @param frame
-	 */
-	void feedInput(const cv::Mat& frame);
+    void initModel();
+    /**
+     * @brief feedInput
+     * Takes a frame and feeds it to the model storage for predictions
+     * @param frame
+     */
+    void feedInput(const cv::Mat& frame);
 
-	/**
-	 * @brief computePredictions
-	 * Uses the model to compute the predictions and store them in
-	 * computedPredictions
-	 */
-	void computePredictions();
+    /**
+     * @brief computePredictions
+     * Uses the model to compute the predictions and store them in
+     * computedPredictions
+     */
+    void computePredictions();
 
-	void printMask();
-	void drawMaskOnFrame(cv::Mat& frame, cv::Mat& frameReduced, std::vector<float>computedMask, int lineSize, int angle);
-	int getBackgroundRotation();
-	void setBackgroundRotation(int angle);
-	void setBackgroundImage(const std::string& backgroundPath);
-	void rotateFrame(int angle, cv::Mat& mat);
+    void printMask();
+    void drawMaskOnFrame(cv::Mat& frame,
+                         cv::Mat& frameReduced,
+                         std::vector<float> computedMask,
+                         int lineSize,
+                         int angle);
+    int getBackgroundRotation();
+    void setBackgroundRotation(int angle);
+    void setBackgroundImage(const std::string& backgroundPath);
+    void rotateFrame(int angle, cv::Mat& mat);
     bool hasBackground() const;
 
-	// Output predictions
-	std::vector<float> computedMask;
+    // Output predictions
+    std::vector<float> computedMask;
 
-	cv::Mat previousMasks[2];
-	cv::Mat backgroundImage;
+    cv::Mat previousMasks[2];
+    cv::Mat backgroundImage;
 
-	cv::Size kSize;
-	float scaleX = 0;
-	float scaleY = 0;
+    cv::Size kSize;
+    float scaleX = 0;
+    float scaleY = 0;
 
-	PluginInference pluginInference;
-	std::string backgroundPath;
+    PluginInference pluginInference;
+    std::string backgroundPath;
 
 private:
-	// Frame
-	cv::Mat frame;
-	int backgroundRotation = 0;
+    // Frame
+    cv::Mat frame;
+    int backgroundRotation = 0;
     bool hasBackground_ = false;
 };
 } // namespace jami
diff --git a/GreenScreen/videoSubscriber.cpp b/GreenScreen/videoSubscriber.cpp
index 4a6fba3..f137584 100644
--- a/GreenScreen/videoSubscriber.cpp
+++ b/GreenScreen/videoSubscriber.cpp
@@ -15,19 +15,20 @@
  *
  *  You should have received a copy of the GNU General Public License
  *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301
+ * USA.
  */
 
 #include "videoSubscriber.h"
 // Use for display rotation matrix
 extern "C" {
-#include <libavutil/display.h>
 #include <accel.h>
+#include <libavutil/display.h>
 }
 
 // Opencv processing
-#include <opencv2/imgproc.hpp>
 #include <opencv2/imgcodecs.hpp>
+#include <opencv2/imgproc.hpp>
 
 // LOGGING
 #include <pluglog.h>
@@ -37,167 +38,178 @@
 
 namespace jami {
 
-VideoSubscriber::VideoSubscriber(const std::string& dataPath): path_{dataPath},
-pluginProcessor{dataPath}
+VideoSubscriber::VideoSubscriber(const std::string& dataPath)
+    : path_ {dataPath}
+    , pluginProcessor {dataPath}
 {
-	/**
-	 * Waits for new frames and then process them
-	 * Writes the predictions in computedPredictions
-	 **/
-	processFrameThread = std::thread([this] {
-		while (running)	{
-			std::unique_lock<std::mutex> l(inputLock);
-			inputCv.wait(l, [this] { return not running or newFrame; });
-			if (not running) {
-				break;
-			}
+    /**
+     * Waits for new frames and then process them
+     * Writes the predictions in computedPredictions
+     **/
+    processFrameThread = std::thread([this] {
+        while (running) {
+            std::unique_lock<std::mutex> l(inputLock);
+            inputCv.wait(l, [this] { return not running or newFrame; });
+            if (not running) {
+                break;
+            }
 
-			pluginProcessor.feedInput(fcopy.resizedFrameRGB);
-			newFrame = false;
-			/** Unclock the mutex, this way we let the other thread
-			 *  copy new data while we are processing the old one
-			 **/
-			l.unlock();
-			pluginProcessor.computePredictions();
-		}
-	});
+            pluginProcessor.feedInput(fcopy.resizedFrameRGB);
+            newFrame = false;
+            /** Unclock the mutex, this way we let the other thread
+             *  copy new data while we are processing the old one
+             **/
+            l.unlock();
+            pluginProcessor.computePredictions();
+        }
+    });
 }
 
 VideoSubscriber::~VideoSubscriber()
 {
-	std::ostringstream oss;
-	oss << "~MediaProcessor" << std::endl;
-	stop();
-	processFrameThread.join();
-	Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+    std::ostringstream oss;
+    oss << "~MediaProcessor" << std::endl;
+    stop();
+    processFrameThread.join();
+    Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
 }
 
 void
-VideoSubscriber::update(jami::Observable<AVFrame*> *, AVFrame* const &iFrame)
+VideoSubscriber::update(jami::Observable<AVFrame*>*, AVFrame* const& iFrame)
 {
-	if (pluginProcessor.pluginInference.isAllocated() && pluginProcessor.hasBackground()) {
-		if (!iFrame)
-			return;
-		AVFrame * pluginFrame = const_cast<AVFrame *>(iFrame);
+    if (pluginProcessor.pluginInference.isAllocated() && pluginProcessor.hasBackground()) {
+        if (!iFrame)
+            return;
+        AVFrame* pluginFrame = const_cast<AVFrame*>(iFrame);
 
-		//======================================================================================
-		// GET FRAME ROTATION
-		AVFrameSideData* side_data =
-			av_frame_get_side_data(iFrame, AV_FRAME_DATA_DISPLAYMATRIX);
+        //======================================================================================
+        // GET FRAME ROTATION
+        AVFrameSideData* side_data = av_frame_get_side_data(iFrame, AV_FRAME_DATA_DISPLAYMATRIX);
 
-		int angle{ 0 };
-		if (side_data) {
-			auto matrix_rotation = reinterpret_cast<int32_t*>(side_data->data);
-			angle = static_cast<int>(av_display_rotation_get(matrix_rotation));
-		}
-		pluginFrame = transferToMainMemory(pluginFrame, AV_PIX_FMT_NV12);
+        int angle {0};
+        if (side_data) {
+            auto matrix_rotation = reinterpret_cast<int32_t*>(side_data->data);
+            angle = static_cast<int>(av_display_rotation_get(matrix_rotation));
+        }
+        pluginFrame = transferToMainMemory(pluginFrame, AV_PIX_FMT_NV12);
 
-		//======================================================================================
-		// GET RAW FRAME
-		// Use a non-const Frame
-		// Convert input frame to RGB
-		int inputHeight = pluginFrame->height;
-		int inputWidth = pluginFrame->width;
+        //======================================================================================
+        // GET RAW FRAME
+        // Use a non-const Frame
+        // Convert input frame to RGB
+        int inputHeight = pluginFrame->height;
+        int inputWidth = pluginFrame->width;
 
-		fcopy.originalSize = cv::Size{inputWidth, inputHeight};
-		FrameUniquePtr bgrFrame = scaler.convertFormat(pluginFrame, AV_PIX_FMT_RGB24);
-		cv::Mat frame =
-			cv::Mat{bgrFrame->height, bgrFrame->width, CV_8UC3, bgrFrame->data[0],
-					static_cast<size_t>(bgrFrame->linesize[0])};
-		// First clone the frame as the original one is unusable because of
-		// linespace
+        fcopy.originalSize = cv::Size {inputWidth, inputHeight};
+        FrameUniquePtr bgrFrame = scaler.convertFormat(pluginFrame, AV_PIX_FMT_RGB24);
+        cv::Mat frame = cv::Mat {bgrFrame->height,
+                                 bgrFrame->width,
+                                 CV_8UC3,
+                                 bgrFrame->data[0],
+                                 static_cast<size_t>(bgrFrame->linesize[0])};
+        // First clone the frame as the original one is unusable because of
+        // linespace
 
-		cv::Mat clone = frame.clone();
-		//======================================================================================
+        cv::Mat clone = frame.clone();
+        //======================================================================================
 
-		pluginProcessor.setBackgroundRotation(angle);
+        pluginProcessor.setBackgroundRotation(angle);
 
-		if (firstRun) {
-			pluginProcessor.pluginInference.setExpectedImageDimensions();
-			fcopy.resizedSize = cv::Size{pluginProcessor.pluginInference.getImageWidth(), pluginProcessor.pluginInference.getImageHeight()};
+        if (firstRun) {
+            pluginProcessor.pluginInference.setExpectedImageDimensions();
+            fcopy.resizedSize = cv::Size {pluginProcessor.pluginInference.getImageWidth(),
+                                          pluginProcessor.pluginInference.getImageHeight()};
 
-			cv::resize(clone, fcopy.resizedFrameRGB, fcopy.resizedSize);
-			pluginProcessor.rotateFrame(angle, fcopy.resizedFrameRGB);
+            cv::resize(clone, fcopy.resizedFrameRGB, fcopy.resizedSize);
+            pluginProcessor.rotateFrame(angle, fcopy.resizedFrameRGB);
 
-			cv::resize(pluginProcessor.backgroundImage, pluginProcessor.backgroundImage, fcopy.resizedSize);
+            cv::resize(pluginProcessor.backgroundImage,
+                       pluginProcessor.backgroundImage,
+                       fcopy.resizedSize);
 
-			firstRun = false;
-		}
+            firstRun = false;
+        }
 
-		if (!newFrame) {
-			std::lock_guard<std::mutex> l(inputLock);
-			cv::resize(clone, fcopy.resizedFrameRGB, fcopy.resizedSize);
-			pluginProcessor.rotateFrame(angle, fcopy.resizedFrameRGB);
-			newFrame = true;
-			inputCv.notify_all();
-		}
+        if (!newFrame) {
+            std::lock_guard<std::mutex> l(inputLock);
+            cv::resize(clone, fcopy.resizedFrameRGB, fcopy.resizedSize);
+            pluginProcessor.rotateFrame(angle, fcopy.resizedFrameRGB);
+            newFrame = true;
+            inputCv.notify_all();
+        }
 
-		fcopy.predictionsFrameRGB = frame;
-		fcopy.predictionsResizedFrameRGB = fcopy.resizedFrameRGB.clone();
-		pluginProcessor.rotateFrame(-angle, fcopy.predictionsResizedFrameRGB);
-		pluginProcessor.drawMaskOnFrame(fcopy.predictionsFrameRGB, fcopy.predictionsResizedFrameRGB,
-												pluginProcessor.computedMask, bgrFrame->linesize[0], angle);
+        fcopy.predictionsFrameRGB = frame;
+        fcopy.predictionsResizedFrameRGB = fcopy.resizedFrameRGB.clone();
+        pluginProcessor.rotateFrame(-angle, fcopy.predictionsResizedFrameRGB);
+        pluginProcessor.drawMaskOnFrame(fcopy.predictionsFrameRGB,
+                                        fcopy.predictionsResizedFrameRGB,
+                                        pluginProcessor.computedMask,
+                                        bgrFrame->linesize[0],
+                                        angle);
 
-		//======================================================================================
-		// REPLACE AVFRAME DATA WITH FRAME DATA
-		if (bgrFrame && bgrFrame->data[0]) {
-			uint8_t* frameData = bgrFrame->data[0];
-			if(angle == 90 || angle == -90)	{
-				std::memmove(frameData, fcopy.predictionsFrameRGB.data, static_cast<size_t>(pluginFrame->width*pluginFrame->height*3) * sizeof(uint8_t));
-			}
-		}
-		// Copy Frame meta data
-		if (bgrFrame && pluginFrame) {
-			av_frame_copy_props(bgrFrame.get(), pluginFrame);
-			scaler.moveFrom(pluginFrame, bgrFrame.get());
-		}
+        //======================================================================================
+        // REPLACE AVFRAME DATA WITH FRAME DATA
+        if (bgrFrame && bgrFrame->data[0]) {
+            uint8_t* frameData = bgrFrame->data[0];
+            if (angle == 90 || angle == -90) {
+                std::memmove(frameData,
+                             fcopy.predictionsFrameRGB.data,
+                             static_cast<size_t>(pluginFrame->width * pluginFrame->height * 3)
+                                 * sizeof(uint8_t));
+            }
+        }
+        // Copy Frame meta data
+        if (bgrFrame && pluginFrame) {
+            av_frame_copy_props(bgrFrame.get(), pluginFrame);
+            scaler.moveFrom(pluginFrame, bgrFrame.get());
+        }
 
-		// Remove the pointer
-		pluginFrame = nullptr;
-	}
+        // Remove the pointer
+        pluginFrame = nullptr;
+    }
 }
 
 void
-VideoSubscriber::attached(jami::Observable<AVFrame*> *observable)
+VideoSubscriber::attached(jami::Observable<AVFrame*>* observable)
 {
-	std::ostringstream oss;
-	oss << "::Attached ! " << std::endl;
-	Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
-	observable_ = observable;
+    std::ostringstream oss;
+    oss << "::Attached ! " << std::endl;
+    Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+    observable_ = observable;
 }
 
 void
-VideoSubscriber::detached(jami::Observable<AVFrame*> *)
+VideoSubscriber::detached(jami::Observable<AVFrame*>*)
 {
-	firstRun = true;
-	observable_ = nullptr;
-	std::ostringstream oss;
-	oss << "::Detached()" << std::endl;
-	Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+    firstRun = true;
+    observable_ = nullptr;
+    std::ostringstream oss;
+    oss << "::Detached()" << std::endl;
+    Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
 }
 
 void
 VideoSubscriber::detach()
 {
-	if (observable_) {
-		firstRun = true;
-		std::ostringstream oss;
-		oss << "::Calling detach()" << std::endl;
-		Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
-		observable_->detach(this);
-	}
+    if (observable_) {
+        firstRun = true;
+        std::ostringstream oss;
+        oss << "::Calling detach()" << std::endl;
+        Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+        observable_->detach(this);
+    }
 }
 
 void
 VideoSubscriber::stop()
 {
-	running = false;
-	inputCv.notify_all();
+    running = false;
+    inputCv.notify_all();
 }
 
 void
 VideoSubscriber::setBackground(const std::string& backgroundPath)
 {
-	pluginProcessor.setBackgroundImage(backgroundPath);
+    pluginProcessor.setBackgroundImage(backgroundPath);
 }
-}
+} // namespace jami
diff --git a/GreenScreen/videoSubscriber.h b/GreenScreen/videoSubscriber.h
index c7ffd17..70a2290 100644
--- a/GreenScreen/videoSubscriber.h
+++ b/GreenScreen/videoSubscriber.h
@@ -26,7 +26,7 @@
 }
 #include <observer.h>
 
-//STl
+// STl
 #include <map>
 #include <thread>
 #include <condition_variable>
@@ -40,8 +40,9 @@
 #include "pluginProcessor.h"
 
 namespace jami {
-    
-class FrameCopy {
+
+class FrameCopy
+{
 public:
     // This frame is a resized version of the original in RGB format
     cv::Mat resizedFrameRGB;
@@ -53,25 +54,25 @@
     cv::Mat predictionsResizedFrameRGB;
 };
 
-class VideoSubscriber : public jami::Observer<AVFrame*> {
+class VideoSubscriber : public jami::Observer<AVFrame*>
+{
 public:
     VideoSubscriber(const std::string& dataPath);
     ~VideoSubscriber();
 
-    virtual void update(jami::Observable<AVFrame*> *, AVFrame* const &) override;
-    virtual void attached(jami::Observable<AVFrame*> *) override;
-    virtual void detached(jami::Observable<AVFrame*> *) override;
+    virtual void update(jami::Observable<AVFrame*>*, AVFrame* const&) override;
+    virtual void attached(jami::Observable<AVFrame*>*) override;
+    virtual void detached(jami::Observable<AVFrame*>*) override;
 
     void detach();
     void stop();
     void setBackground(const std::string& backgroundPath);
 
-
 private:
     // Observer pattern
-    Observable<AVFrame*> *observable_ = nullptr;
+    Observable<AVFrame*>* observable_ = nullptr;
 
-    //Data
+    // Data
     std::string path_;
 
     // Frame
@@ -86,11 +87,11 @@
     std::condition_variable inputCv;
 
     // Status variables of the processing
-    bool firstRun{true};
-    bool running{true};
-    bool newFrame{false};
+    bool firstRun {true};
+    bool running {true};
+    bool newFrame {false};
 
-    //std::shared_ptr<PluginProcessor> pluginProcessor;
+    // std::shared_ptr<PluginProcessor> pluginProcessor;
     PluginProcessor pluginProcessor;
 };
-}
+} // namespace jami