GreenScreen: blur functionality

GitLab: #15
Change-Id: Iafc8b410ebe0228eca9b780504382347b38fd8cc
diff --git a/GreenScreen/pluginProcessor.h b/GreenScreen/pluginProcessor.h
index 0897527..36eaa78 100644
--- a/GreenScreen/pluginProcessor.h
+++ b/GreenScreen/pluginProcessor.h
@@ -20,46 +20,50 @@
  */
 
 #pragma once
-// STL
-#include <condition_variable>
-#include <cstdint>
+
 #include <map>
-#include <memory>
-#include <mutex>
-#include <thread>
 #include <vector>
-// AvFrame
 extern "C" {
 #include <libavutil/frame.h>
 }
-// Plugin
+
 #include <plugin/jamiplugin.h>
 #include <plugin/mediahandler.h>
 
-// Opencv processing
 #include <opencv2/core.hpp>
 #include <onnxruntime_cxx_api.h>
 #ifdef NVIDIA
 #include <cuda_provider_factory.h>
 #endif
-#ifdef ANDROID
+#ifdef __ANDROID__
 #include <nnapi_provider_factory.h>
 #endif
+
+#include <frameFilter.h>
+#include <frameUtils.h>
+#include <mediaStream.h>
+#include <functional>
+
 namespace jami {
 
+using gsFrame = std::unique_ptr<AVFrame, decltype(&frameFree)>;
+
+static const char* modelInputNames[8] = {"image:0"};
+static const char* modelOutputNames[11] = {"Identity:0"};
+
 class PluginProcessor
 {
 public:
-    PluginProcessor(const std::string& dataPath, const std::string& model, const std::string& backgroundImage, bool acc);
+    PluginProcessor(const std::string& model, bool acc);
     ~PluginProcessor();
 
-    void initModel(const std::string& modelPath);
+    void initModel(const std::string& modelPath, bool activateAcc);
     /**
      * @brief feedInput
      * Takes a frame and feeds it to the model storage for predictions
      * @param frame
      */
-    void feedInput(const cv::Mat& frame);
+    void feedInput(AVFrame* input);
 
     /**
      * @brief computePredictions
@@ -69,59 +73,66 @@
     void computePredictions();
 
     void printMask();
-    void drawMaskOnFrame(cv::Mat& frame,
-                         cv::Mat& frameReduced,
-                         std::vector<float> computedMask,
-                         int lineSize,
-                         int angle);
-    int getBackgroundRotation();
-    void setBackgroundRotation(int angle);
-    void setBackgroundImage(const std::string& backgroundPath);
-    void rotateFrame(int angle, cv::Mat& mat);
-    bool hasBackground() const;
-    void resetInitValues(const cv::Size& modelInputSize);
-    bool isAllocated();
+    void drawMaskOnFrame(AVFrame* frame, AVFrame* frameReduced, int angle);
+    bool isAllocated() { return isAllocated_; }
+    void setBlur(bool isBlur) { isBlur_ = isBlur; }
+    void setBlurLevel(const std::string& blurLevel) { blurLevel_ = blurLevel; }
+    void setBackgroundImage(const std::string& backgroundPath) { backgroundPath_ = backgroundPath; }
+    void initFilters(const std::pair<int, int>& inputSize, int format, int angle);
 
-    // Output predictions
-    std::vector<float> computedMask;
-
-    cv::Mat previousMasks[2];
-    cv::Mat backgroundImage;
-
-    cv::Size kSize;
-
-    std::string backgroundPath;
+    std::pair<int, int> modelInputDimensions {257, 257};
+    std::map<int, std::string> rotation = {{90, "-PI/2"},
+                                           {-90, "PI/2"},
+                                           {-180, "-PI"},
+                                           {180, "PI"},
+                                           {0, "0"}};
 
 private:
-    int count{0};
-    cv::Mat frame;
-    int backgroundRotation{0};
-    bool hasBackground_{false};
-    cv::Mat bgdModel, fgdModel;
-    int grabCutMode{1}; // cv::GC_INIT_WITH_MASK = 1;
-    int grabCutIterations{5};
-    int grabcutClass{3};
-    int frameCount{3};
-    float smoothFactors[3] = {0.6f, 0.3f, 0.1f};
-    float kernelSize{0.05f};
+    void resetInitValues();
+    void loadBackground();
+    MediaStream getbgAVFrameInfos();
 
-    bool isAllocated_{false};
-    Ort::Env env{ORT_LOGGING_LEVEL_WARNING, "test"};
-    Ort::Value input_tensor_{nullptr};
-    std::array<int64_t, 3> input_shape_{257, 257, 3};
+    bool isBlur_ {false};
+    std::string blurLevel_;
+    std::string backgroundPath_;
+    cv::Mat previousMasks_[2];
+    std::vector<float> computedMask_;
+    cv::Mat cvFrame_;
 
-    Ort::Value output_tensor_{nullptr};
-    std::array<int64_t, 4> output_shape_{1, 17, 17, 1};
+    // process variables
+    cv::Size kSize_;
+    int count_ {0};
+    cv::Mat bgdModel_, fgdModel_;
+    int grabCutMode_ {1}; // cv::GC_INIT_WITH_MASK = 1;
+    int grabCutIterations_ {4};
+    int grabcutClass_ {3};
+    int frameCount_ {5};
+    float smoothFactors_[3] = {0.6f, 0.3f, 0.1f};
+    float kernelSize_ {0.05f};
 
+    // filters
+    std::string mainFilterDescription_;
+    FrameFilter mainFilter_;
+    std::unique_ptr<AVFormatContext, std::function<void(AVFormatContext*)>> pFormatCtx_
+        = {avformat_alloc_context(), [](AVFormatContext* ptr) {
+               avformat_close_input(&ptr);
+               avformat_free_context(ptr);
+           }};
+    int videoStream_ {-1};
+    MediaStream ims_, ims2_, maskms_;
 
-    std::array<float, 257 * 257 * 3> input_image_{};
-    
-    std::array<float, 17 * 17> results_{};
-    Ort::Session* session_{};
-    const char* input_names[8] = {"image:0"};
-    const char* output_names[11] = {"Identity:0"};
+    // onnx related
+    bool isAllocated_ {false};
+    Ort::Env env_ {ORT_LOGGING_LEVEL_WARNING, "test"};
+    Ort::Session* session_ {};
     Ort::SessionOptions sessOpt_;
 
-    bool activateAcc_{false};
+    Ort::Value input_tensor_ {nullptr};
+    std::array<int64_t, 3> input_shape_ {257, 257, 3};
+    std::array<float, 257 * 257 * 3> input_image_ {};
+
+    Ort::Value output_tensor_ {nullptr};
+    std::array<int64_t, 4> output_shape_ {1, 17, 17, 1};
+    std::array<float, 17 * 17> results_ {};
 };
 } // namespace jami