implements inference for the use of tensorflow_cc library.

Change-Id: Id63a0a0b04d9f429dc1b32a0278184d06be7c610
diff --git a/ForegroundSegmentation/TFInference.cpp b/ForegroundSegmentation/TFInference.cpp
index 3464069..7590dd7 100644
--- a/ForegroundSegmentation/TFInference.cpp
+++ b/ForegroundSegmentation/TFInference.cpp
@@ -3,41 +3,54 @@
 #include <fstream>
 #include <numeric>
 #include <iostream>
-// Tensorflow headers
-#include <tensorflow/lite/builtin_op_data.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/optional_debug_tools.h>
 
-#include "pluglog.h"
+
+#ifdef TFLITE 
+    // Tensorflow headers
+    #include <tensorflow/lite/interpreter.h>
+    #include <tensorflow/lite/builtin_op_data.h>
+    #include <tensorflow/lite/interpreter.h>
+    #include <tensorflow/lite/kernels/register.h>
+    #include <tensorflow/lite/model.h>
+    #include <tensorflow/lite/optional_debug_tools.h>
+#else
+    #include <tensorflow/core/framework/graph.pb.h>
+    #include <tensorflow/core/platform/env.h>
+#endif //TFLITE
+
+#include <pluglog.h>
+
+const char sep = separator();
+const std::string TAG = "FORESEG";
 
 namespace jami 
     {
     TensorflowInference::TensorflowInference(TFModel tfModel) : tfModel(tfModel) {}
 
     TensorflowInference::~TensorflowInference() 
-    {
-        // delete(optionalNnApiDelegate);
-    }
+    { }
 
     bool TensorflowInference::isAllocated() const 
     {
         return allocated;
     }
 
+#ifdef TFLITE 
+
     void TensorflowInference::loadModel() 
     {
+		Plog::log(Plog::LogPriority::INFO, TAG, "inside loadModel()");
         flatbufferModel = tflite::FlatBufferModel::BuildFromFile(tfModel.modelPath.c_str());
         if (!flatbufferModel) 
         {
             std::runtime_error("Failed to load the model file");
         }
         Plog::log(Plog::LogPriority::INFO, "TENSOR", "MODEL LOADED" );
-    }
 
+    }
     void TensorflowInference::buildInterpreter() 
     {
+		Plog::log(Plog::LogPriority::INFO, TAG, "inside buildInterpreter()");
         // Build the interpreter
         tflite::ops::builtin::BuiltinOpResolver resolver;
         tflite::InterpreterBuilder builder(*flatbufferModel, resolver);
@@ -46,21 +59,22 @@
         {
             setInterpreterSettings();
             Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER BUILT" );
+
             if (tfModel.useNNAPI)
             {
-                TfLiteDelegate* optionalNnApiDelegate = tflite::NnApiDelegate();
-                // optionalNnApiDelegate = std::make_unique<TfLiteDelegate*>(tflite::NnApiDelegate());
-                
-                // if (interpreter->ModifyGraphWithDelegate(*(optionalNnApiDelegate.get())) != kTfLiteOk)
-                if (interpreter->ModifyGraphWithDelegate(optionalNnApiDelegate) != kTfLiteOk)
-                {
-                    Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER ERROR!!!" );
-                }
-                else
-                {
-                    Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER SET" );
-                    allocateTensors();
-                }
+                    TfLiteDelegate* optionalNnApiDelegate = tflite::NnApiDelegate();
+                    // optionalNnApiDelegate = std::make_unique<TfLiteDelegate*>(tflite::NnApiDelegate());
+                    
+                    // if (interpreter->ModifyGraphWithDelegate(*(optionalNnApiDelegate.get())) != kTfLiteOk)
+                    if (interpreter->ModifyGraphWithDelegate(optionalNnApiDelegate) != kTfLiteOk)
+                    {
+                        Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER ERROR!!!" );
+                    }
+                    else
+                    {
+                        Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER SET" );
+                        allocateTensors();
+                    }
             }
             else
             {
@@ -69,33 +83,15 @@
         }
     }
 
-    void TensorflowInference::setInterpreterSettings() 
-    {
-        // interpreter->UseNNAPI(tfModel.useNNAPI);
-        interpreter->SetAllowFp16PrecisionForFp32(tfModel.allowFp16PrecisionForFp32);
-        interpreter->SetNumThreads(static_cast<int>(tfModel.numberOfThreads));
-    }
-
-    void TensorflowInference::init() 
-    {
-        // Loading the model
-        Plog::log(Plog::LogPriority::INFO, "TENSOR", "INSIDE THE INIT" );
-        loadModel();
-        buildInterpreter();
-        describeModelTensors();
-    }
-
     void TensorflowInference::allocateTensors() 
     {
-        {    
-            if (interpreter->AllocateTensors() != kTfLiteOk) 
-            {
-                std::runtime_error("Failed to allocate tensors!");
-            } else 
-            {
-                Plog::log(Plog::LogPriority::INFO, "TENSOR", "TENSORS ALLOCATED" );
-                allocated = true;
-            }
+        if (interpreter->AllocateTensors() != kTfLiteOk) 
+        {
+            std::runtime_error("Failed to allocate tensors!");
+        } else 
+        {
+            Plog::log(Plog::LogPriority::INFO, "TENSOR", "TENSORS ALLOCATED" );
+            allocated = true;
         }
     }
 
@@ -141,6 +137,14 @@
         }
     }
 
+    void TensorflowInference::setInterpreterSettings() 
+    {
+        // interpreter->UseNNAPI(tfModel.useNNAPI);
+        interpreter->SetAllowFp16PrecisionForFp32(tfModel.allowFp16PrecisionForFp32);
+
+        interpreter->SetNumThreads(static_cast<int>(tfModel.numberOfThreads));
+    }
+
     void TensorflowInference::describeTensor(std::string prefix, int index) const 
     {
         std::vector<int> dimensions = getTensorDimensions(index);
@@ -181,9 +185,10 @@
 
     void TensorflowInference::runGraph() 
     {
+		Plog::log(Plog::LogPriority::INFO, TAG, "inside runGraph()");
         for (size_t i = 0; i < tfModel.numberOfRuns; i++) 
-        {
-            if (interpreter->Invoke() != kTfLiteOk) 
+        {          
+            if (interpreter->Invoke() != kTfLiteOk)                 
             {
                 Plog::log(Plog::LogPriority::INFO, "RUN GRAPH", "A problem occured when running the graph");
             }
@@ -191,7 +196,60 @@
             {
                 Plog::log(Plog::LogPriority::INFO, "RUN GRAPH", "TF RUN OK");
             }
-            
         }
     }
+
+    void TensorflowInference::init() 
+    {
+        // Loading the model
+        Plog::log(Plog::LogPriority::INFO, TAG, "inside init()" );   
+        loadModel();
+        buildInterpreter();
+        describeModelTensors();
+    }  
+
+#else
+    // Reads a model graph definition from disk, and creates a session object you
+    // can use to run it.
+    void TensorflowInference::LoadGraph() 
+    {
+        tensorflow::GraphDef graph_def;
+        tensorflow::Status load_graph_status = tensorflow::ReadBinaryProto(tensorflow::Env::Default(), tfModel.modelPath, &graph_def);
+        if (!load_graph_status.ok()) {
+            return ; //tensorflow::errors::NotFound("Failed to load compute graph at '",
+                                                //tfModel.modelPath.c_str(), "'");
+        }
+        (&session)->reset(tensorflow::NewSession(tensorflow::SessionOptions()));
+        tensorflow::Status session_create_status = session->Create(graph_def);
+        if (!session_create_status.ok()) {
+            return ;
+        }
+        allocated = true;
+    }
+
+    void TensorflowInference::runGraph() 
+    {
+        for (size_t i = 0; i < tfModel.numberOfRuns; i++) 
+        {
+            // Actually run the image through the model.
+            tensorflow::Status run_status = session->Run({{tfModel.inputLayer, imageTensor}}, {tfModel.outputLayer}, {}, &outputs);
+            if (!run_status.ok())                     
+            {
+                Plog::log(Plog::LogPriority::INFO, "RUN GRAPH", "A problem occured when running the graph");
+            }
+            else
+            {
+                Plog::log(Plog::LogPriority::INFO, "RUN GRAPH", "TF RUN OK");
+            }
+        }
+    }
+
+    void TensorflowInference::init() 
+    {
+        // Loading the model
+        Plog::log(Plog::LogPriority::INFO, "TENSOR", "INSIDE THE INIT" );
+        LoadGraph();
+    } 
+#endif
+ 
 }
diff --git a/ForegroundSegmentation/TFInference.h b/ForegroundSegmentation/TFInference.h
index 1c90b46..4ffaab2 100644
--- a/ForegroundSegmentation/TFInference.h
+++ b/ForegroundSegmentation/TFInference.h
@@ -2,13 +2,16 @@
 
 // Library headers
 #include "TFModels.h"
-#include <tensorflow/lite/delegates/nnapi/nnapi_delegate.h>
 
 // STL
 #include <memory>
 #include <string>
 #include <vector>
 
+#ifdef TFLITE
+    #include <tensorflow/lite/interpreter.h>
+    #include <tensorflow/lite/delegates/nnapi/nnapi_delegate.h>
+
 namespace tflite 
 {
     class FlatBufferModel;
@@ -16,6 +19,27 @@
     class StatefulNnApiDelegate;
 } // namespace tflite
 
+#else
+    #include <tensorflow/core/lib/core/status.h>
+    #include <tensorflow/core/public/session.h>
+    #include <tensorflow/core/framework/tensor.h>
+    #include <tensorflow/core/framework/types.pb.h>
+    #include <tensorflow/core/platform/init_main.h>
+
+namespace tensorflow 
+{
+    class Tensor;
+    class Status;
+    class GraphDef;
+    class Session;
+    class TensorShape;
+    class Env;
+    enum DataType:int;
+} // namespace namespace tensorflow 
+
+#endif
+
+
 namespace jami 
 {
     class TensorflowInference 
@@ -28,6 +52,8 @@
              */
             TensorflowInference(TFModel model);
             ~TensorflowInference();
+
+#ifdef TFLITE
             /**
              * @brief loadModel
              * Load the model from the file described in the Supervised Model
@@ -43,6 +69,16 @@
              */
             void allocateTensors();
 
+            // Debug methods
+            void describeModelTensors() const;
+            void describeTensor(std::string prefix, int index) const;
+
+#else
+            void LoadGraph();
+			tensorflow::Tensor imageTensor = tensorflow::Tensor(tensorflow::DataType::DT_UINT8, tensorflow::TensorShape({ 1, 256, 256, 3 }));
+
+#endif //TFLITE
+
             /**
              * @brief runGraph
              * runs the underlaying graph model.numberOfRuns times
@@ -57,12 +93,9 @@
             void init();
             // Getters
             bool isAllocated() const;
-            // Debug methods
-            void describeModelTensors() const;
-            void describeTensor(std::string prefix, int index) const;
 
         protected:
-
+#ifdef TFLITE
             /**
              * @brief getTensorDimensions
              * Utility method to get Tensorflow Tensor dimensions
@@ -74,6 +107,13 @@
              */
             std::vector<int> getTensorDimensions(int index) const;
 
+            // Tensorflow model and interpreter
+            std::unique_ptr<tflite::FlatBufferModel> flatbufferModel;
+            std::unique_ptr<tflite::Interpreter> interpreter;
+#else
+            std::unique_ptr<tensorflow::Session> session;
+            std::vector<tensorflow::Tensor> outputs;            
+#endif
             TFModel tfModel;
             std::vector<std::string> labels;
 
@@ -83,12 +123,6 @@
              */
             size_t nbLabels;
 
-            // Tensorflow model and interpreter
-            std::unique_ptr<tflite::FlatBufferModel> flatbufferModel;
-            std::unique_ptr<tflite::Interpreter> interpreter;
-            // std::unique_ptr<TfLiteDelegate*> optionalNnApiDelegate;
-
-            // tflite::StatefulNnApiDelegate delegate = tflite::StatefulNnApiDelegate();
             bool allocated = false;
     };
 }
diff --git a/ForegroundSegmentation/TFModels.h b/ForegroundSegmentation/TFModels.h
index 01dc495..e69f5ea 100644
--- a/ForegroundSegmentation/TFModels.h
+++ b/ForegroundSegmentation/TFModels.h
@@ -10,21 +10,28 @@
     TFModelConfiguration(std::string& model): modelPath{model} {}
     std::string modelPath;
     std::vector<unsigned int> normalizationValues;
+    std::vector<int> dims = {1,256,256,3}; //model Input dimensions
+    unsigned int numberOfRuns = 1;
 
-    // Tensorflow specific settings
 
-    #ifdef __ANDROID__
+    // TensorflowLite specific settings
+
+#ifdef TFLITE
+#ifdef __ANDROID__
     bool useNNAPI = true;
-    #else
+#else
     bool useNNAPI = false;
-    #endif // __ANDROID__
-
+#endif //__ANDROID__
     bool allowFp16PrecisionForFp32 = true;
-    unsigned int numberOfThreads = 4;
+    unsigned int numberOfThreads = 1;
 
     // User defined details
     bool inputFloating = false;
-    unsigned int numberOfRuns = 1;
+#else
+    std::string inputLayer = "ImageTensor";
+    std::string outputLayer = "ArgMax";
+#endif // TFLITE
+
 };
 
 struct TFModel : TFModelConfiguration 
diff --git a/ForegroundSegmentation/build.sh b/ForegroundSegmentation/build.sh
index c2d691a..b7c6da9 100644
--- a/ForegroundSegmentation/build.sh
+++ b/ForegroundSegmentation/build.sh
@@ -5,16 +5,16 @@
     echo "DAEMON not provided, building for ${DAEMON}"
 fi
 
-PLUGIN_NAME="foregroungsegmentation"
+PLUGIN_NAME="foregroundsegmentation"
 JPL_FILE_NAME=${PLUGIN_NAME}".jpl"
 SO_FILE_NAME="lib"${PLUGIN_NAME}".so"
 DAEMON_SRC="${DAEMON}/src"
 CONTRIB_PATH="${DAEMON}/contrib"
-# DESTINATION_PATH=/home/${USER}/Projects/ring-plugins
 DESTINATION_PATH="./../build/"
 PLUGINS_LIB="../lib"
 LIBS_DIR="/home/${USER}/Libs"
 
+
 CONTRIB_PLATFORM_CURT=x86_64
 CONTRIB_PLATFORM=${CONTRIB_PLATFORM_CURT}-linux-gnu
 
@@ -23,11 +23,12 @@
 
 # Compile
 clang++ -std=c++14 -shared -fPIC \
--Wl,-Bsymbolic \
+-Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
 -Wall -Wextra \
 -Wno-unused-variable \
 -Wno-unused-function \
 -Wno-unused-parameter \
+-DTFLITE \
 -I"." \
 -I${DAEMON_SRC} \
 -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
@@ -43,6 +44,7 @@
 pluginInference.cpp \
 pluginParameters.cpp \
 -L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/ \
+-L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty/ \
 -L${LIBS_DIR}/_tensorflow_distribution/lib/${CONTRIB_PLATFORM}/ \
 -lswscale \
 -lavutil \
@@ -50,10 +52,30 @@
 -lopencv_imgproc \
 -lopencv_core \
 -ltensorflowlite \
+-lpng \
 -o lib/${CONTRIB_PLATFORM_CURT}/${SO_FILE_NAME}
-# (above) Always put opencv_core after all other opencv libs when linking statically
+# (above) Always put opencv_core after all other opencv libs
+# (above) Always put avutil after all other ffmpeg libs
+# (above) Always put png after all other libs
 
 cp ${LIBS_DIR}/_tensorflow_distribution/lib/${CONTRIB_PLATFORM}/libtensorflowlite.so lib/$CONTRIB_PLATFORM_CURT
+cp /usr/lib/${CONTRIB_PLATFORM}/libswscale.so.4 lib/$CONTRIB_PLATFORM_CURT
+cp /usr/lib/${CONTRIB_PLATFORM}/libavutil.so.55 lib/$CONTRIB_PLATFORM_CURT
+cp /usr/lib/${CONTRIB_PLATFORM}/libpng16.so.16 lib/$CONTRIB_PLATFORM_CURT
+# cp /usr/lib/${CONTRIB_PLATFORM}/libX11.so.6 lib/$CONTRIB_PLATFORM_CURT
+# cp /usr/lib/${CONTRIB_PLATFORM}/libdrm.so.2 lib/$CONTRIB_PLATFORM_CURT
+# cp /usr/lib/${CONTRIB_PLATFORM}/libvdpau.so.1 lib/$CONTRIB_PLATFORM_CURT
+# cp /usr/lib/${CONTRIB_PLATFORM}/libva.so.2 lib/$CONTRIB_PLATFORM_CURT
+# cp /usr/lib/${CONTRIB_PLATFORM}/libva-x11.so.2 lib/$CONTRIB_PLATFORM_CURT
+# cp /usr/lib/${CONTRIB_PLATFORM}/libva-drm.so.2 lib/$CONTRIB_PLATFORM_CURT
+# cp /usr/lib/${CONTRIB_PLATFORM}/libxcb.so.1 lib/$CONTRIB_PLATFORM_CURT
+# cp /lib/${CONTRIB_PLATFORM}/libz.so.1 lib/$CONTRIB_PLATFORM_CURT
+# cp /usr/lib/${CONTRIB_PLATFORM}/libxcb.so.1 lib/$CONTRIB_PLATFORM_CURT
+# cp /usr/lib/${CONTRIB_PLATFORM}/libXext.so.6 lib/$CONTRIB_PLATFORM_CURT
+# cp /usr/lib/${CONTRIB_PLATFORM}/libXfixes.so.3 lib/$CONTRIB_PLATFORM_CURT
+# cp /usr/lib/${CONTRIB_PLATFORM}/libXau.so.6 lib/$CONTRIB_PLATFORM_CURT
+# cp /usr/lib/${CONTRIB_PLATFORM}/libXdmcp.so.6 lib/$CONTRIB_PLATFORM_CURT
+# libz.so.1 => /lib/x86_64-linux-gnu/libz.so.1 (0x00007f4828383000)
 
 zip -r ${JPL_FILE_NAME} data manifest.json lib
 mv ${JPL_FILE_NAME} ${DESTINATION_PATH}/${CONTRIB_PLATFORM}/jpl/
diff --git a/ForegroundSegmentation/buildandroid.sh b/ForegroundSegmentation/buildandroid.sh
index 2300dc3..8170f18 100644
--- a/ForegroundSegmentation/buildandroid.sh
+++ b/ForegroundSegmentation/buildandroid.sh
@@ -15,7 +15,6 @@
 LIBS_DIR="/home/${USER}/Libs"
 DAEMON_SRC="${DAEMON}/src"
 CONTRIB_PATH="${DAEMON}/contrib"
-# DESTINATION_PATH=/home/${USER}/Projects/ring-plugins
 DESTINATION_PATH="./../build/"
 PLUGINS_LIB="../lib"
 
@@ -125,12 +124,13 @@
 
 	# Create so destination folder
     $CXX --std=c++14 -O3 -g -fPIC \
-	-Wl,-Bsymbolic \
+	-Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
 	-shared \
 	-Wall -Wextra \
 	-Wno-unused-variable \
 	-Wno-unused-function \
 	-Wno-unused-parameter \
+	-DTFLITE \
 	-I"." \
 	-I${DAEMON_SRC} \
 	-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
diff --git a/ForegroundSegmentation/data/backgrounds/background2.png b/ForegroundSegmentation/data/backgrounds/background2.png
index 849251c..1183a25 100644
--- a/ForegroundSegmentation/data/backgrounds/background2.png
+++ b/ForegroundSegmentation/data/backgrounds/background2.png
Binary files differ
diff --git a/ForegroundSegmentation/data/models/frozen_inference_graph.pb b/ForegroundSegmentation/data/models/frozen_inference_graph.pb
new file mode 100644
index 0000000..9d59213
--- /dev/null
+++ b/ForegroundSegmentation/data/models/frozen_inference_graph.pb
Binary files differ
diff --git a/ForegroundSegmentation/data/models/model_256_F_16.tflite b/ForegroundSegmentation/data/models/model_256_F_16.tflite
new file mode 100644
index 0000000..f022db6
--- /dev/null
+++ b/ForegroundSegmentation/data/models/model_256_F_16.tflite
Binary files differ
diff --git a/ForegroundSegmentation/data/models/model_256_Qlatency_32.tflite b/ForegroundSegmentation/data/models/model_256_Qlatency_32.tflite
new file mode 100644
index 0000000..40cdd68
--- /dev/null
+++ b/ForegroundSegmentation/data/models/model_256_Qlatency_32.tflite
Binary files differ
diff --git a/ForegroundSegmentation/data/preferences.json b/ForegroundSegmentation/data/preferences.json
index edc5ca7..372215b 100644
--- a/ForegroundSegmentation/data/preferences.json
+++ b/ForegroundSegmentation/data/preferences.json
@@ -15,7 +15,7 @@
         "key": "modellist",
         "title": "Model to load",
         "summary": "Select the model to use",
-        "defaultValue": "model_256_Qlatency.tflite",
+        "defaultValue": "model_256_F_16.tflite",
         "entries": ["mv2_DLV3_256_MQ", "mv2_DLV3_256_QLATENCY_16", "mv2_DLV3_256_QLATENCY_8"],
         "entryValues": ["mobilenet_v2_deeplab_v3_256_myquant.tflite", "model_256_Qlatency_16.tflite", "model_256_Qlatency.tflite"]
     },
@@ -25,7 +25,7 @@
         "key": "backgroundlist",
         "title": "Background image",
         "summary": "Select the image background to use",
-        "defaultValue": "background1.png",
+        "defaultValue": "background2.png",
         "entries": ["background1", "background2"],
         "entryValues": ["background1.png", "background2.png"]
     }
diff --git a/ForegroundSegmentation/main.cpp b/ForegroundSegmentation/main.cpp
index 037fbbd..2798d7f 100644
--- a/ForegroundSegmentation/main.cpp
+++ b/ForegroundSegmentation/main.cpp
@@ -2,9 +2,10 @@
 #include <string.h>
 #include <thread>
 #include <memory>
-#include "plugin/jamiplugin.h"
+#include <plugin/jamiplugin.h>
 #include "pluginMediaHandler.h"
 
+
 extern "C" 
 {
     void pluginExit(void) { }
diff --git a/ForegroundSegmentation/manifest.json b/ForegroundSegmentation/manifest.json
index 587c765..c5aba4b 100644
--- a/ForegroundSegmentation/manifest.json
+++ b/ForegroundSegmentation/manifest.json
@@ -1,6 +1,5 @@
 {
-	"name": "foregroungsegmentation",
+	"name": "foregroundsegmentation",
 	"description" : "Foreground segmentation plugin with tensorflow",
-	"version" : "1.0.0",
-	"libs" : "libtensorflowlite.so"
+	"version" : "1.0.0"
 }
diff --git a/ForegroundSegmentation/pluginInference.cpp b/ForegroundSegmentation/pluginInference.cpp
index 581dae5..1c84407 100644
--- a/ForegroundSegmentation/pluginInference.cpp
+++ b/ForegroundSegmentation/pluginInference.cpp
@@ -3,49 +3,36 @@
 #include <cstring>
 #include <numeric>
 #include "pluglog.h"
-// Tensorflow headers
-#include "tensorflow/lite/interpreter.h"
+
+const char sep = separator();
+const std::string TAG = "FORESEG";
 
 namespace jami 
 {
-	PluginInference::PluginInference(TFModel model) : TensorflowInference(model) {	}
+	PluginInference::PluginInference(TFModel model) : TensorflowInference(model) 
+	{
+#ifndef TFLITE	
+		//Initialize TENSORFLOW_CC lib
+		static const char* kFakeName = "fake program name";
+		int argc = 1;
+		char* fake_name_copy = strdup(kFakeName);
+		char** argv = &fake_name_copy;
+		tensorflow::port::InitMain(kFakeName, &argc, &argv);
+		if (argc > 1) {
+            Plog::log(Plog::LogPriority::INFO, "TENSORFLOW INIT", "Unknown argument " );
+		}
+		free(fake_name_copy);
+#endif	//TFLITE	
+	}
 
 	PluginInference::~PluginInference(){}
 
-	void PluginInference::feedInput(std::vector<uint8_t> &in, int imageWidth,
-										int imageHeight, int imageNbChannels) 
-	{
-		auto input = getInput();
-		std::vector<int> dims = input.second;
-		// Relevant data starts from index 1, dims.at(0) = 1
-		int expectedWidth = dims.at(1);
-		int expectedHeight = dims.at(2);
-		int expectedNbChannels = dims.at(3);
 
-		if (imageNbChannels != expectedNbChannels) 
-		{
-			std::cerr << "The number of channels in the input should match the number "
-						"of channels in the model";
-		} else if (imageWidth != expectedWidth || imageHeight != expectedHeight) 
-		{
-			std::cerr << "The width and height of the input image doesn't match the "
-						"expected width and height of the model";
-		} else 
-		{
-			// Get the input pointer and feed it with data
-			uint8_t *inputDataPointer = input.first;
-			
-			for (size_t i = 0; i < in.size(); i++) 
-			{
-				inputDataPointer[i] = in.at(i);
-			}
-			// Use of memcopy for performance
-			std::memcpy(inputDataPointer, in.data(), in.size() * sizeof(uint8_t));
-		}
-	}
 
+#ifdef TFLITE
 	std::pair<uint8_t *, std::vector<int>> PluginInference::getInput() 
 	{
+		// Plog::log(Plog::LogPriority::INFO, TAG, "inside getInput()");
 		// We assume that we have only one input
 		// Get the input index
 		int input = interpreter->inputs()[0];
@@ -75,6 +62,7 @@
 	std::vector<float>
 	PluginInference::masksPredictions() const 
 	{
+		// Plog::log(Plog::LogPriority::INFO, TAG, "inside masksPredictions()");
         int outputIndex = interpreter->outputs()[0];
         std::vector<int> dims = getTensorDimensions(outputIndex);
 		int totalDimensions = 1;
@@ -106,9 +94,9 @@
         return out;
 	}
 
-
 	void PluginInference::setExpectedImageDimensions() 
 	{
+		// Plog::log(Plog::LogPriority::INFO, TAG, "inside setExpectedImageDimensions()");
 		// We assume that we have only one input
 		// Get the input index
 		int input = interpreter->inputs()[0];
@@ -119,19 +107,114 @@
 		imageHeight = dims.at(2);
 		imageNbChannels = dims.at(3);
 	}
+#else //TFLITE
+	// Given an image file name, read in the data, try to decode it as an image,
+	// resize it to the requested size, and then scale the values as desired.
+	void PluginInference::ReadTensorFromMat(const cv::Mat& image) 
+	{	
+		// std::ostringstream oss;
+		// oss << image.rows;
+		// Plog::log(Plog::LogPriority::INFO, "ReadTensorFromMat", oss.str());
+		tensorflow::StringPiece tmp_data = imageTensor.tensor_data();
+		// oss << image.rows;
+		// Plog::log(Plog::LogPriority::INFO, "ReadTensorFromMat", oss.str());
+		memcpy(const_cast<char*>(tmp_data.data()), (image.data), image.rows * image.cols * sizeof(uint8_t));
+	}
+
+	std::vector<float>
+	PluginInference::masksPredictions() const 
+	{
+		std::ostringstream oss;
+		std::vector<int> dims;
+		int flatSize = 1;
+		int num_dimensions = outputs[0].shape().dims();
+		// oss << num_dimensions;
+		for(int ii_dim=0; ii_dim<num_dimensions; ii_dim++) {
+			// oss << "  " << outputs[0].shape().dim_size(ii_dim);
+			dims.push_back(outputs[0].shape().dim_size(ii_dim));
+			flatSize *= outputs[0].shape().dim_size(ii_dim);
+		}
+
+		// oss << "  " << flatSize;
+		// Plog::log(Plog::LogPriority::INFO, "masksPredictions", oss.str());
+		std::vector<float> out;
+		int type = outputs[0].dtype();
+
+		// oss << "  " << type;
+		// Plog::log(Plog::LogPriority::INFO, "masksPredictions", oss.str());
+
+		switch(type)
+		{
+			case tensorflow::DataType::DT_INT32:
+			{
+				for (int offset = 0; offset < flatSize; offset++)
+				{			
+					// Get vaule through .flat()
+					out.push_back(static_cast<float> (outputs[0].flat<tensorflow::int32>()(offset)));					
+				}
+				break;
+			}
+			case tensorflow::DataType::DT_INT64:
+			{
+				for (int offset = 0; offset < flatSize; offset++)
+				{			
+					// Get vaule through .flat()
+					if (outputs[0].flat<tensorflow::int64>()(offset) == 15 or outputs[0].flat<tensorflow::int64>()(offset) == 1)
+					{
+						oss << "  " << outputs[0].flat<tensorflow::int64>()(offset);
+						Plog::log(Plog::LogPriority::INFO, "masksPredictions", oss.str());
+					}
+					out.push_back(static_cast<float> (outputs[0].flat<tensorflow::int64>()(offset)));					
+				}
+				break;
+			}
+			default:
+			{
+				for (int offset = 0; offset < flatSize; offset++)
+				{			
+					// Get vaule through .flat()
+					out.push_back(0);					
+				}
+				break;
+			}
+		}
+
+        return out;
+	}
+
+	void PluginInference::setExpectedImageDimensions() 
+	{
+		
+		if (tfModel.dims[1] != 0)
+		{
+			imageWidth = tfModel.dims[1];
+		}
+		if (tfModel.dims[2] != 0)
+		{
+			imageHeight = tfModel.dims[2];
+		}
+		if (tfModel.dims[3] != 0)
+		{
+			imageNbChannels = tfModel.dims[3];
+		}	
+	}
+#endif
 
 	int PluginInference::getImageWidth() const 
 	{ 
+		// Plog::log(Plog::LogPriority::INFO, TAG, "inside getImageWidth()");
 		return imageWidth; 
 	}
 
 	int PluginInference::getImageHeight() const 
 	{ 
+		// Plog::log(Plog::LogPriority::INFO, TAG, "inside getImageHeight()");
 		return imageHeight; 
 	}
 
 	int PluginInference::getImageNbChannels() const 
 	{
+		// Plog::log(Plog::LogPriority::INFO, TAG, "inside getImageNbChannels()");
 		return imageNbChannels;
 	}
 } // namespace jami
diff --git a/ForegroundSegmentation/pluginInference.h b/ForegroundSegmentation/pluginInference.h
index 4829ebd..d052257 100644
--- a/ForegroundSegmentation/pluginInference.h
+++ b/ForegroundSegmentation/pluginInference.h
@@ -24,21 +24,7 @@
 			PluginInference(TFModel model);
 			~PluginInference();
 
-			std::vector<float> masksPredictions() const;
-
-			/**
-			 * @brief feedInput
-			 * Checks if the image input dimensions matches the expected ones in the model
-			 * If so, fills the image data directly to the model input pointer
-			 * Otherwise, resizes the image in order to match the model expected image
-			 * dimensions And fills the image data throught the resize method
-			 * @param in: image data
-			 * @param imageWidth
-			 * @param imageHeight
-			 * @param imageNbChannels
-			 **/
-			void feedInput(std::vector<uint8_t> &in, int imageWidth, int imageHeight,
-							int imageNbChannels);
+#ifdef TFLITE
 			/**
 			 * @brief getInput
 			 * Returns the input where to fill the data
@@ -51,6 +37,14 @@
 			 */
 			std::pair<uint8_t *, std::vector<int>> getInput();
 
+#else
+			void ReadTensorFromMat(const cv::Mat& image);
+
+#endif //TFLITE
+
+			std::vector<float> masksPredictions() const;
+
+
 			/**
 			 * @brief setExpectedImageDimensions
 			 * Sets imageWidth and imageHeight from the sources
@@ -62,6 +56,7 @@
 			int getImageHeight() const;
 			int getImageNbChannels() const;
 
+
 		private:
 			int imageWidth = 0;
 			int imageHeight = 0;
diff --git a/ForegroundSegmentation/pluginMediaHandler.cpp b/ForegroundSegmentation/pluginMediaHandler.cpp
index fe73f2d..4ca4a55 100644
--- a/ForegroundSegmentation/pluginMediaHandler.cpp
+++ b/ForegroundSegmentation/pluginMediaHandler.cpp
@@ -2,7 +2,7 @@
 // Logger
 #include "pluglog.h"
 const char sep = separator();
-const std::string TAG = "GENERIC";
+const std::string TAG = "FORESEG";
 
 namespace jami 
 {
@@ -11,8 +11,9 @@
 	{
     	setGlobalPluginParameters(ppm_);
     	setId(datapath_);
-		mpInput = std::make_shared<VideoSubscriber>(datapath_);
-		mpReceive = std::make_shared<VideoSubscriber>(datapath_);
+		mVS = std::make_shared<VideoSubscriber>(datapath_);
+		// mpInput = std::make_shared<VideoSubscriber>(datapath_);
+		// mpReceive = std::make_shared<VideoSubscriber>(datapath_);
 	}
 
 	void PluginMediaHandler::notifyAVFrameSubject(const StreamData &data, jami::avSubjectPtr subject)
@@ -39,11 +40,13 @@
 		oss << "preferredStreamDirection " << preferredStreamDirection << std::endl;
 		if (data.type == StreamType::video && !data.direction && data.direction == preferredStreamDirection) 
 		{
-			subject->attach(mpInput.get()); // my image
+			subject->attach(mVS.get()); // my image
+			// subject->attach(mpInput.get()); // my image
 			oss << "got my sent image attached" << std::endl;
 		} else if (data.type == StreamType::video && data.direction && data.direction == preferredStreamDirection) 
 		{
-			subject->attach(mpReceive.get()); // the image i receive from the others on the call
+			subject->attach(mVS.get()); // the image i receive from the others on the call
+			// subject->attach(mpReceive.get()); // the image i receive from the others on the call
 			oss << "got my received image attached" << std::endl;
 		}
 		Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
@@ -70,14 +73,15 @@
 
 	void PluginMediaHandler::detach()
 	{
-		mpInput->detach();
-		mpReceive->detach();
+		mVS->detach();
+		// mpInput->detach();
+		// mpReceive->detach();
 	}
 
 	PluginMediaHandler::~PluginMediaHandler() 
 	{
 		std::ostringstream oss;
-		oss << " ~GENERIC Plugin" << std::endl;
+		oss << " ~FORESEG Plugin" << std::endl;
 		Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
 		detach();
 	}
diff --git a/ForegroundSegmentation/pluginMediaHandler.h b/ForegroundSegmentation/pluginMediaHandler.h
index 2243e69..e213c0a 100644
--- a/ForegroundSegmentation/pluginMediaHandler.h
+++ b/ForegroundSegmentation/pluginMediaHandler.h
@@ -23,8 +23,9 @@
 			virtual void detach() override;
 			virtual void setPreferenceAttribute(const std::string& key, const std::string& value) override;
 			
-			std::shared_ptr<VideoSubscriber> mpInput;
-			std::shared_ptr<VideoSubscriber> mpReceive;
+			std::shared_ptr<VideoSubscriber> mVS;
+			// std::shared_ptr<VideoSubscriber> mpInput;
+			// std::shared_ptr<VideoSubscriber> mpReceive;
 			
 			std::string dataPath() const { return datapath_; }
 
diff --git a/ForegroundSegmentation/pluginParameters.h b/ForegroundSegmentation/pluginParameters.h
index 5bd5d7d..ec38f22 100644
--- a/ForegroundSegmentation/pluginParameters.h
+++ b/ForegroundSegmentation/pluginParameters.h
@@ -5,11 +5,18 @@
 #include <string>
 #include <map>
 
-
 struct PluginParameters {
     std::string stream = "out";
+#ifdef TFLITE
+#ifdef __ANDROID
     std::string model = "model_256_Qlatency.tflite";
-    std::string image = "background1.png";
+#else
+    std::string model = "model_256_F_16.tflite";
+#endif    
+#else
+    std::string model = "frozen_inference_graph.pb";
+#endif //TFLITE
+    std::string image = "background2.png";
 };
 
 void setGlobalPluginParameters(std::map<std::string, std::string> pp);
diff --git a/ForegroundSegmentation/pluginProcessor.cpp b/ForegroundSegmentation/pluginProcessor.cpp
index e66cf72..6b215a4 100644
--- a/ForegroundSegmentation/pluginProcessor.cpp
+++ b/ForegroundSegmentation/pluginProcessor.cpp
@@ -6,7 +6,7 @@
 #include <opencv2/imgcodecs.hpp>
 #include <opencv2/core.hpp>
 // Logger
-#include "pluglog.h"
+#include <pluglog.h>
 // Avutil/Display for rotation
 
 extern "C" {
@@ -15,7 +15,7 @@
 
 const char sep = separator();
 
-const std::string TAG = "GENERIC";
+const std::string TAG = "FORESEG";
 
 PluginParameters* mPluginParameters = getGlobalPluginParameters(); 
 
@@ -23,19 +23,27 @@
 {
 
 	PluginProcessor::PluginProcessor(const std::string &dataPath):
-	pluginInference{TFModel{dataPath + sep + "models/" + mPluginParameters->model,
-	// pluginInference{TFModel{dataPath + sep + "models/mobilenet_v2_deeplab_v3_256_myquant.tflite",
-							dataPath + sep + "models/pascal_voc_labels_list.tflite"}},
-	// backgroundPath{dataPath + sep + "backgrounds" + sep + "background1.png"}
+	pluginInference{TFModel{dataPath + sep + "models/" + mPluginParameters->model}},
 	backgroundPath{dataPath + sep + "backgrounds" + sep + mPluginParameters->image}
 	{
 		initModel();
 		backgroundImage = cv::imread(backgroundPath);
+		if (backgroundImage.cols == 0)
+		{
+        	Plog::log(Plog::LogPriority::ERROR, TAG, "Background image not Loaded");			
+		}
+		cv::cvtColor(backgroundImage, backgroundImage, cv::COLOR_BGR2RGB);
+#ifndef __ANDROID__		
+		backgroundImage.convertTo(backgroundImage, CV_32FC3);
+#endif
+		//TODO: properly resize the background image to maintain background aspect ratio in the output image;
+        Plog::log(Plog::LogPriority::INFO, TAG, mPluginParameters->model);
 	}
 
 	void PluginProcessor::initModel()
 	{
-		try {
+		Plog::log(Plog::LogPriority::INFO, TAG, "inside getImageNbChannels()");
+		try {			
 			pluginInference.init();
 		} catch (std::exception& e) 
 		{
@@ -43,11 +51,14 @@
 		}
 		std::ostringstream oss;
         oss << "Model is allocated " << pluginInference.isAllocated();
-        Plog::log(Plog::LogPriority::INFO, "GENERIC", oss.str());
+        Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
 	}
 
+	
+#ifdef TFLITE	
 	void PluginProcessor::feedInput(const cv::Mat &frame) 
 	{
+		Plog::log(Plog::LogPriority::INFO, TAG, "inside feedInput()");
 		auto pair = pluginInference.getInput();
 		uint8_t *inputPointer = pair.first;
 		// Relevant data starts from index 1, dims.at(0) = 1
@@ -59,9 +70,19 @@
 
 		inputPointer = nullptr;
 	}
+#else
+	void PluginProcessor::feedInput(const cv::Mat &frame) 
+	{
+		std::ostringstream oss;
+		oss << frame.rows;
+		Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+		pluginInference.ReadTensorFromMat(frame);
+	}
+#endif //TFLITE
 
 	void PluginProcessor::computePredictions() 
 	{
+		Plog::log(Plog::LogPriority::INFO, TAG, "inside computePredictions()");
 		// Run the graph
 		pluginInference.runGraph();
 		auto predictions = pluginInference.masksPredictions();
@@ -74,29 +95,32 @@
 	{
 		for (size_t i = 0; i < computedMask.size(); i++) 
 		{
-			// for (int j = 0; j < computedMask.rows; j++) 
-			{
-				// Log the predictions
-				std::ostringstream oss;
-				// oss << "\nrows: " << computedMask.rows << std::endl;
-				// oss << "\ncols: " << computedMask.cols << std::endl;
-				// oss << "\nclass "<<i<<"x"<<j<<": " << computedMask.at<int>(cv::Point(i,j)) << std::endl;
-				oss << "\nclass: "<< computedMask[i] << std::endl;
-				Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
-			}
+			// Log the predictions
+			std::ostringstream oss;
+			oss << "\nclass: "<< computedMask[i] << std::endl;
+			Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
 		}
 	}
 
-	void PluginProcessor::drawMaskOnFrame(
-		cv::Mat &frame, std::vector<float>computedMask) 
+void PluginProcessor::drawMaskOnReducedFrame(cv::Mat &frame,
+		cv::Mat &frameReduced, std::vector<float>computedMask) 
 	{
-		scaleX = (float)(backgroundImage.cols) / (float)(pluginInference.getImageWidth());
-		scaleY = (float)(backgroundImage.rows) / (float)(pluginInference.getImageHeight());
-		int absOFFSETY = 8*scaleY;
-		int absOFFSETX = 8*scaleX;
+		// Plog::log(Plog::LogPriority::INFO, TAG, "inside drawMaskOnFrame()");
+		if (computedMask.empty())
+		{
+			return;
+		}
+		//TODO: MAKE VARIABLE WITH THE MODEL not the platform
+#ifdef __ANDROID__
+		int absOFFSETY = 4;
+		int absOFFSETX = 4;
+#else
+		int absOFFSETY = 8;
+		int absOFFSETX = 8;
+#endif		
 		int OFFSETY = -absOFFSETY;
 		int OFFSETX = -absOFFSETX;
-		if (computedMask1.size() == 0)
+		if (computedMask1.empty())
 		{
 			computedMask3 = std::vector<float>(computedMask.size(), 0);
 			computedMask2 = std::vector<float>(computedMask.size(), 0);
@@ -114,10 +138,124 @@
 			else
 			{
 				computedMask[i] = 0;
+				#ifdef __ANDROID__
 				mFloatMask[i] = (float)(   (int)((0.6 * computedMask1[i] + 0.3 * computedMask2[i] + 0.1 * computedMask3[i])) % 256   );
+				#else
+				mFloatMask[i] = 0.;
+				#endif
 			}			
 		}
+        cv::Mat maskImg(pluginInference.getImageWidth(), pluginInference.getImageHeight(), 
+							CV_32FC1, mFloatMask.data());
 
+		cv::resize(maskImg, maskImg, cv::Size(maskImg.cols+2*absOFFSETX, maskImg.rows+2*absOFFSETY));
+
+		kSize = cv::Size(maskImg.cols*0.05, maskImg.rows*0.05);
+		if(kSize.height%2 == 0)
+		{
+			kSize.height -= 1;
+		}
+		if(kSize.width%2 == 0)
+		{
+			kSize.width -= 1;
+		}
+
+		GaussianBlur (maskImg, maskImg, kSize, 0); //mask from 0 to 255.
+		maskImg = maskImg / 255.; //mask from 0 to 1.
+#ifndef __ANDROID__
+		cv::Rect roi(absOFFSETX+OFFSETX, absOFFSETY+OFFSETY, backgroundImage.cols, backgroundImage.rows); //Create a rect 
+		cv::Mat roiMaskImg = maskImg(roi); //Crop the region of interest using above rect
+
+		cv::Mat roiMaskImgComplementary = 1. - roiMaskImg; //mask from 1. to 0
+		
+		std::vector<cv::Mat> channels;
+		std::vector<cv::Mat> channelsComplementary;
+
+		channels.emplace_back(roiMaskImg);
+		channels.emplace_back(roiMaskImg);
+		channels.emplace_back(roiMaskImg);
+		channelsComplementary.emplace_back(roiMaskImgComplementary);
+		channelsComplementary.emplace_back(roiMaskImgComplementary);
+		channelsComplementary.emplace_back(roiMaskImgComplementary);
+
+		cv::merge(channels, roiMaskImg);
+		cv::merge(channelsComplementary, roiMaskImgComplementary);
+
+		
+		int origType = frameReduced.type();
+		int roiMaskType = roiMaskImg.type();
+
+		cv::Mat clone = frameReduced.clone();
+		
+		clone.convertTo(clone, roiMaskType);
+		clone = clone.mul(roiMaskImg);
+		clone += backgroundImage.mul(roiMaskImgComplementary);
+		clone.convertTo(clone, origType);
+		int numberChannels = 3;
+
+		cv::resize(clone, clone, cv::Size(frame.cols, frame.rows));
+
+		std::memcpy(frame.data, clone.data, 
+		static_cast<size_t>(clone.cols) * static_cast<size_t>(clone.rows) * static_cast<size_t>(numberChannels) * sizeof(uint8_t));
+
+#else
+		for (int col = 0; col < frame.cols; col++)
+		{
+			for (int row = 0; row < frame.rows; row++)
+			{
+				float maskValue = maskImg.at<float>(cv::Point(col+absOFFSETX+OFFSETX, row+absOFFSETY+OFFSETY));
+				frame.at<cv::Vec3b>(cv::Point(col, row)) = 
+					backgroundImage.at<cv::Vec3b>(cv::Point(col, row)) * (1. - maskValue)
+					+ frame.at<cv::Vec3b>(cv::Point(col, row)) * maskValue;
+			}
+		}
+#endif // __ANDROID__
+		computedMask3 = std::vector<float>(computedMask2.begin(), computedMask2.end());
+		computedMask2 = std::vector<float>(computedMask1.begin(), computedMask1.end());
+		computedMask1 = std::vector<float>(computedMask.begin(), computedMask.end());
+	}
+
+
+	void PluginProcessor::drawMaskOnFrame(
+		cv::Mat &frame, std::vector<float>computedMask) 
+	{
+		// Plog::log(Plog::LogPriority::INFO, TAG, "inside drawMaskOnFrame()");
+		if (computedMask.empty())
+		{
+			return;
+		}
+		
+		scaleX = (float)(backgroundImage.cols) / (float)(pluginInference.getImageWidth());
+		scaleY = (float)(backgroundImage.rows) / (float)(pluginInference.getImageHeight());
+		int absOFFSETY = 4*scaleY;
+		int absOFFSETX = 4*scaleX;
+		int OFFSETY = -absOFFSETY;
+		int OFFSETX = -absOFFSETX;
+		if (computedMask1.empty())
+		{
+			computedMask3 = std::vector<float>(computedMask.size(), 0);
+			computedMask2 = std::vector<float>(computedMask.size(), 0);
+			computedMask1 = std::vector<float>(computedMask.size(), 0);
+		}
+
+		std::vector<float> mFloatMask(computedMask.begin(), computedMask.end());
+		for (size_t i = 0; i < computedMask.size(); i++)
+		{
+			if(computedMask[i] == 15)
+			{
+				computedMask[i] = 255;
+				mFloatMask[i] = 255;
+			}
+			else
+			{
+				computedMask[i] = 0;
+				#ifdef __ANDROID__
+				mFloatMask[i] = (float)(   (int)((0.6 * computedMask1[i] + 0.3 * computedMask2[i] + 0.1 * computedMask3[i])) % 256   );
+				#else
+				mFloatMask[i] = 0.;
+				#endif
+			}			
+		}
         cv::Mat maskImg(pluginInference.getImageWidth(), pluginInference.getImageHeight(), 
 							CV_32FC1, mFloatMask.data());
 
@@ -133,24 +271,56 @@
 			kSize.width -= 1;
 		}
 
-		GaussianBlur (maskImg, maskImg, kSize, 0);
+		GaussianBlur (maskImg, maskImg, kSize, 0); //mask from 0 to 255.
+		maskImg = maskImg / 255.; //mask from 0 to 1.
+#ifndef __ANDROID__
+		cv::Rect roi(absOFFSETX+OFFSETX, absOFFSETY+OFFSETY, backgroundImage.cols, backgroundImage.rows); //Create a rect 
+		cv::Mat roiMaskImg = maskImg(roi); //Crop the region of interest using above rect
+
+		cv::Mat roiMaskImgComplementary = 1. - roiMaskImg; //mask from 1. to 0
 		
+		std::vector<cv::Mat> channels;
+		std::vector<cv::Mat> channelsComplementary;
+
+		channels.emplace_back(roiMaskImg);
+		channels.emplace_back(roiMaskImg);
+		channels.emplace_back(roiMaskImg);
+		channelsComplementary.emplace_back(roiMaskImgComplementary);
+		channelsComplementary.emplace_back(roiMaskImgComplementary);
+		channelsComplementary.emplace_back(roiMaskImgComplementary);
+
+		cv::merge(channels, roiMaskImg);
+		cv::merge(channelsComplementary, roiMaskImgComplementary);
+
 		
+		int origType = frame.type();
+		int roiMaskType = roiMaskImg.type();
+
+		cv::Mat clone = frame.clone();
+		
+		clone.convertTo(clone, roiMaskType);
+		clone = clone.mul(roiMaskImg);
+		clone += backgroundImage.mul(roiMaskImgComplementary);
+		clone.convertTo(clone, origType);
+		int numberChannels = 3;
+		std::memcpy(frame.data, clone.data, 
+		static_cast<size_t>(clone.cols) * static_cast<size_t>(clone.rows) * static_cast<size_t>(numberChannels) * sizeof(uint8_t));
+
+#else
 		for (int col = 0; col < frame.cols; col++)
 		{
 			for (int row = 0; row < frame.rows; row++)
 			{
-				cv::Point point(col+absOFFSETX+OFFSETX, row+absOFFSETY+OFFSETY);
-				float maskValue = maskImg.at<float>(point)/255.;
+				float maskValue = maskImg.at<float>(cv::Point(col+absOFFSETX+OFFSETX, row+absOFFSETY+OFFSETY));
 				frame.at<cv::Vec3b>(cv::Point(col, row)) = 
-					backgroundImage.at<cv::Vec3b>(cv::Point(col, row)) * (1 - maskValue)
+					backgroundImage.at<cv::Vec3b>(cv::Point(col, row)) * (1. - maskValue)
 					+ frame.at<cv::Vec3b>(cv::Point(col, row)) * maskValue;
 			}
 		}
-
+#endif // __ANDROID__
 		computedMask3 = std::vector<float>(computedMask2.begin(), computedMask2.end());
 		computedMask2 = std::vector<float>(computedMask1.begin(), computedMask1.end());
 		computedMask1 = std::vector<float>(computedMask.begin(), computedMask.end());
 	}
 
-} // namespace jami
+} // namespace jami
\ No newline at end of file
diff --git a/ForegroundSegmentation/pluginProcessor.h b/ForegroundSegmentation/pluginProcessor.h
index 91ea197..9bdac86 100644
--- a/ForegroundSegmentation/pluginProcessor.h
+++ b/ForegroundSegmentation/pluginProcessor.h
@@ -14,10 +14,10 @@
 #include <libavutil/frame.h>
 }
 // Plugin
-#include "plugin/jamiplugin.h"
-#include "plugin/mediahandler.h"
+#include <plugin/jamiplugin.h>
+#include <plugin/mediahandler.h>
 // Frame scaler for frame transformations
-#include "framescaler.h"
+#include <framescaler.h>
 
 namespace jami 
 {
@@ -43,9 +43,8 @@
 			void computePredictions();
 						
 			void printMask();
-			void drawMaskOnFrame(
-				cv::Mat &frame,
-				const std::vector<float> computedMask);		
+			void drawMaskOnFrame(cv::Mat &frame, const std::vector<float> computedMask);	
+			void drawMaskOnReducedFrame(cv::Mat &frame, cv::Mat &frameReduced, std::vector<float>computedMask);
 
 			// Output predictions
 			std::vector<float> computedMask;
diff --git a/ForegroundSegmentation/videoSubscriber.cpp b/ForegroundSegmentation/videoSubscriber.cpp
index cf1b365..d22b977 100644
--- a/ForegroundSegmentation/videoSubscriber.cpp
+++ b/ForegroundSegmentation/videoSubscriber.cpp
@@ -1,4 +1,3 @@
-
 #include "videoSubscriber.h"
 // Use for display rotation matrix
 extern "C" {
@@ -10,7 +9,7 @@
 #include <opencv2/imgcodecs.hpp>
 
 // LOGGING
-#include "pluglog.h"
+#include <pluglog.h>
 
 const std::string TAG = "FORESEG";
 const char sep = separator();
@@ -34,12 +33,14 @@
                 {
                     break;
                 }
+				// Plog::log(Plog::LogPriority::INFO, TAG, "feed");
 				pluginProcessor.feedInput(fcopy.resizedFrameRGB);
                 newFrame = false;
                 /** Unclock the mutex, this way we let the other thread
                  *  copy new data while we are processing the old one
                  **/
                 l.unlock();
+				// Plog::log(Plog::LogPriority::INFO, TAG, "compute");
 				pluginProcessor.computePredictions();
             }
         });
@@ -49,18 +50,17 @@
 	{
 		std::ostringstream oss;
 		oss << "~MediaProcessor" << std::endl;
-		Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
 		stop();
 		processFrameThread.join();
+		Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
 	}
 
 	void VideoSubscriber::update(jami::Observable<AVFrame *> *, AVFrame *const &iFrame) 
 	{
+		// Plog::log(Plog::LogPriority::INFO, TAG, "inside update()");
 		if (isAttached) 
 		{
 			std::ostringstream oss;
-			//oss << "Looking for iFrame signal: ";
-			//Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
 			//======================================================================================
 			// GET FRAME ROTATION
 			AVFrameSideData *side_data =
@@ -73,6 +73,7 @@
 				angle = static_cast<int>(av_display_rotation_get(matrix_rotation));
 			}
 
+			// Plog::log(Plog::LogPriority::INFO, TAG, "step GET RAW FRAME");
 			//======================================================================================
 			// GET RAW FRAME
 			// Use a non-const Frame
@@ -88,8 +89,8 @@
 						static_cast<size_t>(bgrFrame->linesize[0])};
 			// First clone the frame as the original one is unusable because of
 			// linespace
+
 			cv::Mat clone = frame.clone();
-			//pluginProcessor.backgroundImage = frame.clone();
 			//======================================================================================
 			// ROTATE THE FRAME
 			// rotateFrame(angle, clone);
@@ -97,21 +98,14 @@
 			
 			if (firstRun) 
 			{
+				// Plog::log(Plog::LogPriority::INFO, TAG, "step firstRun");
 				pluginProcessor.pluginInference.setExpectedImageDimensions();
 				fcopy.resizedSize = cv::Size{pluginProcessor.pluginInference.getImageWidth(), pluginProcessor.pluginInference.getImageHeight()};
 
 				cv::resize(clone, fcopy.resizedFrameRGB, fcopy.resizedSize);
-				cv::resize(pluginProcessor.backgroundImage, pluginProcessor.backgroundImage, fcopy.originalSize);
+				// cv::resize(pluginProcessor.backgroundImage, pluginProcessor.backgroundImage, fcopy.originalSize);
+				cv::resize(pluginProcessor.backgroundImage, pluginProcessor.backgroundImage, fcopy.resizedSize);
 				
-				// Print Frame dimensions
-				// std::ostringstream oss1;
-				// oss1 << "IS ALLOCATED " << pluginProcessor.pluginInference.isAllocated() << std::endl;
-				// oss1 << "FRAME[]: w: " << iFrame->width << " , h: " << iFrame->height
-					// << " , format: " << iFrame->format << std::endl;
-				// oss1 << "DESIRED WIDTH: " << pluginProcessor.pluginInference.getImageWidth() << std::endl;
-				// oss1 << "DESIRED WIDTH: " << pluginProcessor.pluginInference.getImageHeight() << std::endl;
-				// Plog::log(Plog::LogPriority::INFO, TAG, oss1.str());
-
 				firstRun = false;
 			}
 
@@ -119,17 +113,18 @@
 
 			if (!newFrame) 
 			{
+				// Plog::log(Plog::LogPriority::INFO, TAG, "step newFrame");
 				std::lock_guard<std::mutex> l(inputLock);
 				cv::resize(clone, fcopy.resizedFrameRGB, fcopy.resizedSize);
 				newFrame = true;
 				inputCv.notify_all();
 			}
 
+			// Plog::log(Plog::LogPriority::INFO, TAG, "step result");
 			fcopy.predictionsFrameBGR = frame;
-			// pluginProcessor.printMask();
-			pluginProcessor.drawMaskOnFrame(fcopy.predictionsFrameBGR, pluginProcessor.computedMask);
-
-
+			fcopy.predictionsResizedFrameBGR = fcopy.resizedFrameRGB.clone();
+			// pluginProcessor.drawMaskOnFrame(fcopy.predictionsFrameBGR, pluginProcessor.computedMask);
+			pluginProcessor.drawMaskOnReducedFrame(fcopy.predictionsFrameBGR, fcopy.predictionsResizedFrameBGR, pluginProcessor.computedMask);
 
 			//======================================================================================
 			// REPLACE AVFRAME DATA WITH FRAME DATA
@@ -137,6 +132,7 @@
 			// rotateFrame(-angle, clone);
 			// rotateFrame(-angle, frame);
 
+			// Plog::log(Plog::LogPriority::INFO, TAG, "step REPLACE AVFRAME DATA WITH FRAME DATA");
 			if (bgrFrame && bgrFrame->data[0]) 
 			{
 				uint8_t* frameData = bgrFrame->data[0];
@@ -146,6 +142,15 @@
 				}
 			}
 
+			// Plog::log(Plog::LogPriority::INFO, TAG, "step Copy Frame meta data");
+			// if (bgrFrame) {
+			// 	Plog::log(Plog::LogPriority::INFO, TAG, "step bgrFrame");
+
+			// }
+			// if (incFrame) {
+			// 	Plog::log(Plog::LogPriority::INFO, TAG, "step incFrame");
+
+			// }
 			// Copy Frame meta data
 			if (bgrFrame && incFrame) 
 			{
@@ -161,7 +166,8 @@
 			Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
 
 			// Remove the pointer
-			//incFrame = nullptr;
+			incFrame = nullptr;
+			// Plog::log(Plog::LogPriority::INFO, TAG, "step end update");
 		}
 	}
 
diff --git a/ForegroundSegmentation/videoSubscriber.h b/ForegroundSegmentation/videoSubscriber.h
index de08c5a..4e7dc08 100644
--- a/ForegroundSegmentation/videoSubscriber.h
+++ b/ForegroundSegmentation/videoSubscriber.h
@@ -4,7 +4,7 @@
 extern "C" {
     #include <libavutil/frame.h>
 }
-#include "observer.h"
+#include <observer.h>
 
 //STl
 #include <map>
@@ -12,16 +12,11 @@
 #include <condition_variable>
 
 // Frame Scaler
-#include "framescaler.h"
+#include <framescaler.h>
 
 // OpenCV headers
 #include <opencv2/core.hpp>
 
-// Flatbuffers / Tensorflow headers
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/optional_debug_tools.h>
-
 #include "pluginProcessor.h"
 
 namespace jami 
@@ -35,6 +30,8 @@
 		// This frame is used to draw predictions into in RGB format
 		cv::Mat predictionsFrameBGR;
         cv::Size originalSize;
+		// This frame is used to draw predictions into in RGB format on a resized frame
+		cv::Mat predictionsResizedFrameBGR;
 	};
 
     class VideoSubscriber : public jami::Observer<AVFrame *>