Modify GreenScreen

Change-Id: I179896b2414f35f0efc543738e7ecc943d5deb1d
diff --git a/GreenScreen/pluginProcessor.h b/GreenScreen/pluginProcessor.h
index e81dd95..9b86b7b 100644
--- a/GreenScreen/pluginProcessor.h
+++ b/GreenScreen/pluginProcessor.h
@@ -28,8 +28,6 @@
 #include <mutex>
 #include <thread>
 #include <vector>
-// Filters
-#include "pluginInference.h"
 // AvFrame
 extern "C" {
 #include <libavutil/frame.h>
@@ -38,14 +36,24 @@
 #include <plugin/jamiplugin.h>
 #include <plugin/mediahandler.h>
 
+// Opencv processing
+#include <opencv2/core.hpp>
+#include <onnxruntime_cxx_api.h>
+#ifdef NVIDIA
+#include <cuda_provider_factory.h>
+#endif
+#ifdef ANDROID
+#include <nnapi_provider_factory.h>
+#endif
 namespace jami {
 
 class PluginProcessor
 {
 public:
-    PluginProcessor(const std::string& dataPath);
+    PluginProcessor(const std::string& dataPath, const std::string& model, const std::string& backgroundImage, bool acc);
+    ~PluginProcessor();
 
-    void initModel();
+    void initModel(const std::string& modelPath);
     /**
      * @brief feedInput
      * Takes a frame and feeds it to the model storage for predictions
@@ -72,6 +80,7 @@
     void rotateFrame(int angle, cv::Mat& mat);
     bool hasBackground() const;
     void resetInitValues(const cv::Size& modelInputSize);
+    bool isAllocated();
 
     // Output predictions
     std::vector<float> computedMask;
@@ -81,28 +90,38 @@
 
     cv::Size kSize;
 
-    PluginInference pluginInference;
     std::string backgroundPath;
-    int count = 0;
 
 private:
-    // Frame
+    int count{0};
     cv::Mat frame;
-    int backgroundRotation = 0;
-    bool hasBackground_ = false;
+    int backgroundRotation{0};
+    bool hasBackground_{false};
     cv::Mat bgdModel, fgdModel;
-    int grabCutMode = 1; // cv::GC_INIT_WITH_MASK = 1;
-    int grabCutIterations = 5;
-#ifdef TFLITE
-    int grabcutClass = 2;
-    int frameCount = 3;
-    float smoothFactors[2] = {0.3f, 0.05f};
-    float kernelSize = 0.1f;
-#else
-    int grabcutClass = 3;
-    int frameCount = 5;
+    int grabCutMode{1}; // cv::GC_INIT_WITH_MASK = 1;
+    int grabCutIterations{5};
+    int grabcutClass{3};
+    int frameCount{3};
     float smoothFactors[3] = {0.6f, 0.3f, 0.1f};
-    float kernelSize = 0.05f;
-#endif
+    float kernelSize{0.05f};
+
+    bool isAllocated_{false};
+    Ort::Env env{ORT_LOGGING_LEVEL_WARNING, "test"};
+    Ort::Value input_tensor_{nullptr};
+    std::array<int64_t, 3> input_shape_{257, 257, 3};
+
+    Ort::Value output_tensor_{nullptr};
+    std::array<int64_t, 4> output_shape_{1, 17, 17, 1};
+
+
+    std::array<float, 257 * 257 * 3> input_image_{};
+    
+    std::array<float, 17 * 17> results_{};
+    Ort::Session* session_{};
+    const char* input_names[8] = {"image:0"};
+    const char* output_names[11] = {"Identity:0"};
+    Ort::SessionOptions sessOpt_;
+
+    bool activateAcc_{false};
 };
 } // namespace jami