diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 000000000..1ca15471d
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,9 @@
+.vscode
+**/node_modules
+**/.env
+**/.env.example
+**/.git
+test
+bfx-report-ui/build
+*Dockerfile*
+*docker-compose*
diff --git a/.env.example b/.env.example
new file mode 100644
index 000000000..e88639c81
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,11 @@
+NODE_ENV=production
+UI_ENV=production
+NGINX_ENV=production
+
+NGINX_AUTOINDEX=on
+
+REPO_BRANCH=master
+
+NGINX_PORT=80
+NGINX_HOST=localhost
+SECRET_KEY=secretKey
diff --git a/.gitignore b/.gitignore
index 4dce97738..40c9308e1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -18,3 +18,18 @@ workers/loc.api/queue/temp/*.csv
workers/loc.api/queue/temp/*.zip
bfx-report-ui/.env
bfx-report-ui/src
+.env
+scripts/get-docker.sh
+scripts/maintenance/maintenance.on
+terraform/.terraform
+terraform/*.tfstate
+terraform/*.tfstate.backup
+terraform/*.plan
+terraform/terraform.tfvars
+terraform/bfx-ssh-key.pem
+terraform/worker-connect.sh
+terraform/config/backend.conf
+terraform/config/credentials.conf
+terraform/backend.tf
+terraform/.terraform.tfstate.lock.info
+READY
diff --git a/Dockerfile.express b/Dockerfile.express
new file mode 100644
index 000000000..08716fdd6
--- /dev/null
+++ b/Dockerfile.express
@@ -0,0 +1,36 @@
+FROM node:14.16.0-alpine
+
+ENV BIND_HOST "0.0.0.0"
+
+WORKDIR /home/node/bfx-report-express
+
+RUN apk add --no-cache --virtual \
+ .gyp \
+ python3 \
+ make \
+ g++ \
+ curl \
+ git \
+ openssh \
+ bash
+
+COPY bfx-report-ui/bfx-report-express/package*.json \
+ bfx-report-ui/bfx-report-express/.npmrc \
+ ./
+RUN npm i --production --no-audit
+
+COPY ./bfx-report-ui/bfx-report-express/config ./config
+RUN cp config/default.json.example config/default.json
+
+COPY ./bfx-report-ui/bfx-report-express .
+COPY ./scripts/express-entrypoint.sh /usr/local/bin/
+
+HEALTHCHECK --interval=10s --timeout=3s --start-period=10s --retries=3 \
+ CMD curl --retry-delay 10 --max-time 3 --retry 3 --retry-max-time 30 \
+ -f -X POST -H "Content-Type: application/json" \
+ -d '{ "method": "isSchedulerEnabled" }' \
+ http://${BIND_HOST}:${API_PORT}/api/json-rpc \
+ || kill 1
+
+ENTRYPOINT ["express-entrypoint.sh"]
+CMD ["index.js"]
diff --git a/Dockerfile.grenache-grape b/Dockerfile.grenache-grape
new file mode 100644
index 000000000..8b9c9cba3
--- /dev/null
+++ b/Dockerfile.grenache-grape
@@ -0,0 +1,29 @@
+FROM node:14.16.0-alpine
+
+ENV BIND_HOST "0.0.0.0"
+
+WORKDIR /home/node/grenache-grape
+
+RUN apk add --no-cache --virtual \
+ .gyp \
+ python3 \
+ make \
+ g++ \
+ curl \
+ git \
+ openssh \
+ bash
+
+RUN git clone https://github.com/bitfinexcom/grenache-grape.git . \
+ && npm i --production --no-audit
+
+COPY ./scripts/grenache-grape-entrypoint.sh /usr/local/bin/
+
+HEALTHCHECK --interval=10s --timeout=3s --start-period=10s --retries=3 \
+ CMD curl --retry-delay 10 --max-time 3 --retry 3 --retry-max-time 30 \
+ -f -X POST -d '{}' \
+ http://${BIND_HOST}:${GRAPE_APH} \
+ || kill 1
+
+ENTRYPOINT ["grenache-grape-entrypoint.sh"]
+CMD ["bin/grape.js"]
diff --git a/Dockerfile.ui-builder b/Dockerfile.ui-builder
new file mode 100644
index 000000000..33c981657
--- /dev/null
+++ b/Dockerfile.ui-builder
@@ -0,0 +1,21 @@
+FROM node:14.16.0-alpine
+
+WORKDIR /home/node/bfx-report-ui
+
+RUN apk add --no-cache --virtual \
+ .gyp \
+ python3 \
+ make \
+ g++ \
+ git \
+ openssh \
+ bash
+
+COPY ./scripts/maintenance/index.html var/www/html/maintenance.html
+COPY ./bfx-report-ui/package*.json ./
+RUN npm i --no-audit
+
+COPY ./bfx-report-ui .
+COPY ./scripts/build-ui.sh /usr/local/bin/
+
+ENTRYPOINT ["build-ui.sh"]
diff --git a/Dockerfile.worker b/Dockerfile.worker
new file mode 100644
index 000000000..a5fe51f48
--- /dev/null
+++ b/Dockerfile.worker
@@ -0,0 +1,61 @@
+FROM node:14.16.0-alpine
+
+ARG GRC_VER="0.7.1"
+
+WORKDIR /home/node/grenache-cli
+
+RUN apk update && apk add --no-cache --virtual \
+ .gyp \
+ python3 \
+ make \
+ jq \
+ help2man \
+ gcc \
+ musl-dev \
+ autoconf \
+ automake \
+ libtool \
+ pkgconfig \
+ file \
+ patch \
+ bison \
+ clang \
+ flex \
+ curl \
+ perl \
+ perl-dev \
+ wget \
+ g++ \
+ git \
+ openssh \
+ bash
+
+RUN wget -c https://github.com/bitfinexcom/grenache-cli/releases/download/${GRC_VER}/grenache-cli-${GRC_VER}.tar.xz \
+ && tar -xf grenache-cli-${GRC_VER}.tar.xz \
+ && cd grenache-cli-${GRC_VER} \
+ && ./configure \
+ && make \
+ && make install \
+ && grenache-keygen
+
+WORKDIR /home/node/bfx-reports-framework
+
+COPY package*.json .npmrc ./
+RUN npm i --production --no-audit
+
+COPY ./config ./config
+RUN cp config/schedule.json.example config/schedule.json \
+ && cp config/common.json.example config/common.json \
+ && cp config/service.report.json.example config/service.report.json \
+ && cp config/facs/grc.config.json.example config/facs/grc.config.json \
+ && cp config/facs/grc-slack.config.json.example config/facs/grc-slack.config.json
+
+COPY . .
+COPY ./scripts/worker-entrypoint.sh /usr/local/bin/
+
+HEALTHCHECK --interval=10s --timeout=3s --start-period=10s --retries=3 \
+ CMD grenache-lookup -g ${GRAPE_HOST} -p ${GRAPE_APH} "rest:report:api" \
+ || kill 1
+
+ENTRYPOINT ["worker-entrypoint.sh"]
+CMD ["worker.js"]
diff --git a/README.md b/README.md
index 1396454b8..3d1a354a6 100644
--- a/README.md
+++ b/README.md
@@ -81,3 +81,259 @@ npm run start
```console
npm test
```
+
+## Docker release
+
+A pre-configured [Docker/docker-compose](https://www.docker.com) infrastructure is provided to run the reports framework on an independent Linux server for individual user use. Functionality has been tested on `Ubuntu 20.04 LTS`
+
+### Main Structure
+
+The following Docker containers are launched:
+
+- `grape1` and `grape2` to run the [grenache-grape](https://github.com/bitfinexcom/grenache-grape) network with two grapes. For it provides `Dockerfile.grenache-grape` file
+- `worker` to run the main grenache `bfx-reports-framework` worker which contains the general business logic. For it provides `Dockerfile.worker` file
+- `express` to run the lightweight server for proxying requests to the grenache `worker`. For it provides `Dockerfile.express` file
+- `ui-builder` to run a building proccess of UI static files. For it provides `Dockerfile.ui-builder` file
+- `nginx` to run the [reverse proxy server](https://www.nginx.com/resources/glossary/reverse-proxy-server)
+
+To simplify setup/deploy processes the following bash scripts are provided:
+
+- `./scripts/setup.sh` - CLI as an easy way to get through the setup process
+- `./scripts/sync-repo.sh` - CLI to fetch the last changes of the repository/sub-modules from the main remote repo
+- `./scripts/launch.sh` - CLI to launch/re-launch docker-compose services to apply the last fetched changes
+- `./scripts/deploy.sh` - simple deploy script which sync all repo with remote and launch all services
+
+### Requirements
+
+The setup was tested with the following dependencies:
+
+- Docker version 20.10.12
+- docker-compose version 1.29.2
+- git version 2.24.1
+
+### Setup process
+
+After cloning the repository there's needed to configure the app. For it can be used `./scripts/setup.sh` bash script
+Available the following arguments:
+
+```console
+./scripts/setup.sh -h
+
+Usage: ./scripts/setup.sh [options] [-d] | [-h]
+
+Options:
+ -y With this option, all questions are automatically answered with 'Yes'. In this case, the questions themselves will not be displayed
+ -n Don't remove files of DBs
+ -h Display help
+```
+
+During the setup process, the user will be asked some questions
+
+- if no Docker/docker-compose are found, the user will be prompted to install them
+- to remove all Log and DB and CSV files to setup the app from scratch
+- to choose syncing repository branch (master/beta), by default master
+- to set NGINX port, by default 80
+- to set NGINX host, by default localhost
+- to sync all repository/sub-modules (there will be run `./scripts/sync-repo.sh` script)
+
+Based on the responses, a `.env` file will be configured with the following default values:
+
+```console
+NODE_ENV=production
+UI_ENV=production
+NGINX_ENV=production
+
+NGINX_AUTOINDEX=on
+
+REPO_BRANCH=master
+
+NGINX_PORT=80
+NGINX_HOST=localhost
+SECRET_KEY=secretKey
+```
+
+> Pay attention, for security reasons, don't recommend storing secret key value in the `.env` file for production, need to set it into `SECRET_KEY` environment variable!
+
+### Sync repo process
+
+In case needs to fetch the last changes all repository/sub-modules might be used `./scripts/sync-repo.sh` bash script
+Available the following arguments:
+
+```console
+./scripts/setup.sh -h
+
+Usage: ./scripts/sync-repo.sh [options] | [-h]
+
+Options:
+ -a Sync all repositories
+ -w Sync bfx-reports-framework only
+ -u Sync bfx-report-ui only
+ -e Sync bfx-report-express only
+ -h Display help
+```
+
+### Launch process
+
+To launch/re-launch the docker-compose services of the app available the `./scripts/launch.sh` bash script
+Available the following arguments:
+
+```console
+./scripts/launch.sh -h
+
+Usage: ./scripts/launch.sh [options] [-d] | [-h]
+
+Options:
+ -a Launch all repositories
+ -g Launch grenache-grape network only
+ -w Launch bfx-reports-framework worker only
+ -e Launch bfx-report-express server only
+ -n Launch NGINX reverse proxy server only
+ -u Build bfx-report-ui static files only
+ -d Detached mode: Run containers in the background
+ -h Display help
+```
+
+> To run containers of the app in the background, use `-d` argument for the `Detached mode`
+
+### Deploy process
+
+Provides the simple deploy bash script `./scripts/deploy.sh`
+It provide the following steps:
+
+- add a maintenance flag to show maintenance HTML `./scripts/maintenance/index.html` page via NGINX when the deploy process is going on
+- sync all repository/sub-modules
+- relaunch all docker-compose services except `nginx` service
+- remove the maintenance flag
+
+## Terraform IaaS
+
+This section describes the implementation of automated infrastructure setting-up in the [AWS](https://aws.amazon.com) cloud provider and the automated deployment process using [Terraform](https://www.terraform.io), it's an open-source infrastructure as code software tool, check [Intro to Terraform](https://www.terraform.io/intro). Terraform community has already written plenty of providers. All publicly available providers can be found on the Terraform Registry, including [Amazon Web Services (AWS)](https://registry.terraform.io/providers/hashicorp/aws/latest/docs). Functionality has been tested on `Ubuntu 20.04 LTS`
+
+### Main Modules Structure
+
+The infrastructure configuration is located in the `./terraform` directory of the project root. It consists of modules with the following structure:
+
+- `app` - contains the main application configuration, and consists of submodules:
+ - `network` - creates VPC resources, based on [AWS VPC Terraform module](https://github.com/terraform-aws-modules/terraform-aws-vpc)
+ - `ec2` - creates resources to setup Ubuntu instance, attach volume for DB, set deployment process, generate bash script file as an easy way to have an ability to connect via SSH
+ - `ssh_key` - creates SSH private/public keys resources
+ - `kms_key` - creates AWS KMS key resources for encryption purposes of DB volume
+ - `ssm_param_secret_key` - creates AWS SSM parameter to have secure storage of generated on setup step `Private Key` used to encrypt user's `apiKey`/`apiSecret`
+- `backend` - creates resources to setup remote state management with S3 backend for your account. Based on [remote state S3 backend module](https://github.com/nozaq/terraform-aws-remote-state-s3-backend)
+
+### Requirements to use Terraform
+
+The setup was tested with the following dependencies:
+
+- Terraform version 1.1.7
+
+> To use Terraform you will need to install it. [This official tutorial](https://learn.hashicorp.com/tutorials/terraform/install-cli) will be useful to install Terraform.
+
+### Setting
+
+To follow this instructions you will need AWS account and [associated credentials](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html) that allow you to create resources. Enter AWS Access keys into the `terraform/config/credentials.conf` file as shown below:
+
+```console
+cp terraform/config/credentials.conf.example terraform/config/credentials.conf
+vim terraform/config/credentials.conf
+```
+
+To be able to override default values of cofigurable variables of infrastructure use `terraform/terraform.tfvars` file as shown below:
+
+```console
+cp terraform/terraform.tfvars.example terraform/terraform.tfvars
+vim terraform/terraform.tfvars
+```
+
+In that `terraform/terraform.tfvars` file placed the main important variables useful for customization. More available variables may be seen in the `terraform/variables.tf` file. Be careful, overriding some variables without deep understanding can bring unexpected behavior.
+
+### Bootstrap the project with S3 backend management for tfstate files
+
+Terraform must store state about your managed infrastructure and configuration. This state is used by Terraform to map real world resources to your configuration, keep track of metadata, and to improve performance for large infrastructures.
+
+This state is stored by default in a local file named `terraform/terraform.tfstate`, but it can also be stored remotely, which works better in a team environment. For more info check the following links:
+
+- [Terraform State](https://www.terraform.io/language/state)
+- [Backends](https://www.terraform.io/language/settings/backends)
+
+**Why does this exist?**
+
+> One of the most popular backend options for terraform is AWS (S3 for state, and DynamoDB for the lock table). If your project specifies an AWS/S3 backend, Terraform requires the existence of an S3 bucket in which to store state information about your project, and a DynamoDB table to use for locking (this prevents you, your collaborators, and CI from stepping on each other with terraform commands which either modify your state or the infrastructure itself).
+
+Currently, this configuration provides a preset for [S3 Backend](https://www.terraform.io/language/settings/backends/s3)
+
+To manage the project use the fillowing commands:
+
+```console
+cd terraform
+
+# Prepare your working directory for other commands. Whenever a configurations backend changes, you must run terraform init again to validate and configure the backend before you can perform any plans, applies, or state operations
+terraform init
+
+# Create backend infrastructure
+terraform apply -target=module.backend
+
+# Re-configure the backend
+terraform init -reconfigure -backend-config=config/backend.conf
+
+# Create or update infrastructure
+terraform apply
+
+# WARNING: The below commands are useful when you want to destroy previously-created infrastructure
+terraform destroy -target=module.app
+terraform apply -target=module.backend -var=is_backend_s3_config_removed=true
+terraform init -migrate-state
+```
+
+> For the `production` environment strongly recommended to use flow with S3 Backend. But it can be redundant for the `development` or `test` env. Check the below section to get an easier way to bootstrap the project without S3 backend management.
+
+### Bootstrap the project without S3 backend management for tfstate files
+
+To disable S3 backend management set `is_backend_s3_enabled=false` in the `terraform/terraform.tfvars` file. And right now you can use the default backend called `local` to store state as a local file on disk.
+
+To manage the project use the fillowing commands:
+
+```console
+cd terraform
+
+# Prepare your working directory for other commands. Whenever a configurations backend changes, you must run terraform init again to validate and configure the backend before you can perform any plans, applies, or state operations
+terraform init
+
+# Create or update infrastructure
+terraform apply
+
+# WARNING: The below commands are useful when you want to destroy previously-created infrastructure
+terraform destroy
+```
+
+### Deployment
+
+To setup the deployment process need to execute terraform apply of the plan. Each time the command is executed, it will execute [remote-exec Provisioner](https://www.terraform.io/language/resources/provisioners/remote-exec). That Provisioner will connect to the AWS EC2 instance via SSH and launch the `scripts/deploy.sh` bash script described above.
+
+To deploy the project just use the fillowing command:
+
+```console
+cd terraform
+
+terraform apply
+```
+
+### Notes
+
+- After applying Terraform infrastructure, some useful outputs will be shown in the terminal. One of those being the `Public DNS`, the address as to access the react app. Check the available outputs in the `terraform/outputs.tf` file.
+
+Example of Outputs:
+
+```console
+Apply complete! Resources: 1 added, 0 changed, 1 destroyed.
+
+Outputs:
+
+bfx_reports_framework_pub_dns = "ec2-1-234-56-78.eu-central-1.compute.amazonaws.com"
+bfx_reports_framework_pub_ip = "1.234.56.78"
+```
+
+- A SSH key and a bash script file will be created in Terraform folder as to connect via SSH to AWS EC2. The path to those files will be:
+
+- `terraform/bfx-ssh-key.pem`
+- `terraform/worker-connect.sh`
diff --git a/bfx-report-ui b/bfx-report-ui
index deb2f74d6..f5ab417ed 160000
--- a/bfx-report-ui
+++ b/bfx-report-ui
@@ -1 +1 @@
-Subproject commit deb2f74d64d02f2566662ebd73cc01a2914f98e9
+Subproject commit f5ab417edb44c274802b2ad30e7dab0bb0215879
diff --git a/docker-compose.yaml b/docker-compose.yaml
new file mode 100644
index 000000000..2b539d79d
--- /dev/null
+++ b/docker-compose.yaml
@@ -0,0 +1,145 @@
+version: '3.9'
+
+x-common-variables: &common-variables
+ API_PORT: "31339"
+ GRAPE_APH: "30001"
+ GRAPE_HOST: "grape1"
+
+services:
+ grape1:
+ container_name: grape1
+ build:
+ context: .
+ dockerfile: Dockerfile.grenache-grape
+ restart: unless-stopped
+ networks:
+ - grapes
+ environment:
+ <<: *common-variables
+ GRAPE_DP: "20001"
+ GRAPE_BN: "20002"
+ GRAPE_BIND: "grape2"
+ NODE_ENV: ${NODE_ENV}
+
+ grape2:
+ container_name: grape2
+ build:
+ context: .
+ dockerfile: Dockerfile.grenache-grape
+ restart: unless-stopped
+ depends_on:
+ grape1:
+ condition: service_healthy
+ networks:
+ - grapes
+ environment:
+ <<: *common-variables
+ GRAPE_DP: "20002"
+ GRAPE_APH: "40001"
+ GRAPE_BN: "20001"
+ GRAPE_BIND: "grape1"
+ NODE_ENV: ${NODE_ENV}
+
+ worker:
+ container_name: worker
+ build:
+ context: .
+ dockerfile: Dockerfile.worker
+ restart: unless-stopped
+ depends_on:
+ grape1:
+ condition: service_healthy
+ grape2:
+ condition: service_healthy
+ network_mode: "service:grape1"
+ volumes:
+ - temp:/home/node/bfx-reports-framework/temp
+ - ./db:/home/node/bfx-reports-framework/db
+ - ./csv:/home/node/bfx-reports-framework/csv
+ - ./logs:/home/node/bfx-reports-framework/logs
+ environment:
+ <<: *common-variables
+ WORKER_API_PORT: "1337"
+ WORKER_WS_PORT: "1455"
+ TEMP_FOLDER: "/home/node/bfx-reports-framework/temp"
+ DB_FOLDER: "/home/node/bfx-reports-framework/db"
+ CSV_FOLDER: "/home/node/bfx-reports-framework/csv"
+ LOGS_FOLDER: "/home/node/bfx-reports-framework/logs"
+ SECRET_KEY: ${SECRET_KEY} # Required
+ SCHEDULER_RULE: ${SCHEDULER_RULE} # Non-required
+ NGINX_AUTOINDEX: ${NGINX_AUTOINDEX} # Required
+ NODE_ENV: ${NODE_ENV}
+
+ express:
+ container_name: express
+ build:
+ context: .
+ dockerfile: Dockerfile.express
+ restart: unless-stopped
+ depends_on:
+ worker:
+ condition: service_healthy
+ grape1:
+ condition: service_healthy
+ grape2:
+ condition: service_healthy
+ networks:
+ - grapes
+ - front-net
+ volumes:
+ - ./logs:/home/node/bfx-report-express/logs
+ environment:
+ <<: *common-variables
+ NODE_ENV: ${NODE_ENV}
+
+ ui-builder:
+ build:
+ context: .
+ dockerfile: Dockerfile.ui-builder
+ volumes:
+ - front-build:/home/node/bfx-report-ui/front-build
+ environment:
+ FRONT_BUILD_FOLDER: "/home/node/bfx-report-ui/front-build"
+ CI_ENVIRONMENT_NAME: ${UI_ENV} # Non-required
+ NGINX_HOST: ${NGINX_HOST} # Required
+ NGINX_PORT: ${NGINX_PORT} # Non-required
+
+ nginx:
+ container_name: nginx
+ image: nginx:1.21.5-alpine
+ restart: unless-stopped
+ depends_on:
+ express:
+ condition: service_healthy
+ networks:
+ - front-net
+ ports:
+ - "${NGINX_PORT}:8080"
+ volumes:
+ - front-build:/var/www/html
+ - ./scripts/maintenance:/var/www/maintenance
+ - ./nginx-configs/autoindex-format.xslt:/etc/nginx/autoindex-format.xslt:ro
+ - ./nginx-configs/nginx.conf:/etc/nginx/nginx.conf:ro
+ - ./nginx-configs/templates:/etc/nginx/templates
+ - ./logs:/var/log/nginx/bfx-reports-framework
+ - ./csv:/var/www/csv
+ environment:
+ <<: *common-variables
+ CSV_FOLDER: "/var/www/csv"
+ MAINTENANCE_STATIC_FOLDER: "/var/www/maintenance"
+ FRONT_BUILD_FOLDER: "/var/www/html"
+ LOGS_FOLDER: "/var/log/nginx/bfx-reports-framework"
+ NGINX_HOST: ${NGINX_HOST} # Required
+ NGINX_PORT: ${NGINX_PORT} # Non-required
+ NGINX_ENV: ${NGINX_ENV} # Non-required
+ NGINX_AUTOINDEX: ${NGINX_AUTOINDEX} # Required
+
+networks:
+ grapes:
+ driver: bridge
+ front-net:
+ driver: bridge
+
+volumes:
+ temp:
+ front-build:
diff --git a/nginx-configs/autoindex-format.xslt b/nginx-configs/autoindex-format.xslt
new file mode 100644
index 000000000..28cf368aa
--- /dev/null
+++ b/nginx-configs/autoindex-format.xslt
@@ -0,0 +1,279 @@
+
+]>
+
+
+
+
+
+
+
+ B
+ K
+ M
+ G
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Home
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Home
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ -
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Bitfinex Reporting & Performance Tools
+
+
+
+
+
+
+
+ Bitfinex Report CSV
+
+
+
+
+
+
+
+
+
+
+ Name
+ Last Modified
+ Size
+
+
+
+
+ Parent Directory
+
+ -
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/nginx-configs/nginx.conf b/nginx-configs/nginx.conf
new file mode 100644
index 000000000..0c3da38cb
--- /dev/null
+++ b/nginx-configs/nginx.conf
@@ -0,0 +1,59 @@
+user nginx;
+worker_processes auto;
+
+error_log /var/log/nginx/error.log warn;
+pid /var/run/nginx.pid;
+
+load_module modules/ngx_http_xslt_filter_module.so;
+
+events {
+ worker_connections 1024;
+}
+
+http {
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+
+ log_format main '$remote_addr - $remote_user [$time_local] "$request" '
+ '$status $body_bytes_sent "$http_referer" '
+ '"$http_user_agent" "$http_x_forwarded_for"';
+ log_format main_bfx 'site="$server_name" server="$host" dest_port="$server_port" dest_ip="$server_addr" '
+ 'src="$remote_addr" src_ip="$realip_remote_addr" user="$remote_user" '
+ 'time_local="$time_local" protocol="$server_protocol" status="$status" '
+ 'bytes_out="$bytes_sent" bytes_in="$upstream_bytes_received" '
+ 'http_referer="$http_referer" http_user_agent="$http_user_agent" '
+ 'nginx_version="$nginx_version" http_x_forwarded_for="$http_x_forwarded_for" '
+ 'http_x_header="$http_x_header" uri_query="$query_string" uri_path="$uri" '
+ 'http_method="$request_method" response_time="$upstream_response_time" '
+ 'cookie="$http_cookie" request_time="$request_time" category="$sent_http_content_type" https="$https"';
+
+ access_log /var/log/nginx/access.log main;
+
+ client_header_timeout 2m;
+ client_body_timeout 2m;
+ send_timeout 2m;
+
+ gzip off;
+ gzip_vary on;
+ gzip_proxied any;
+ gzip_comp_level 6;
+ gzip_buffers 16 8k;
+ gzip_min_length 1024;
+ gzip_types text/css text/xml text/plain application/javascript application/json image/jpeg image/png image/gif image/x-icon image/svg+xml image/webp application/font-woff;
+
+ sendfile on;
+ tcp_nopush on;
+ tcp_nodelay on;
+ autoindex off;
+ server_tokens off;
+
+ variables_hash_bucket_size 64;
+ variables_hash_max_size 2048;
+ types_hash_bucket_size 256;
+ server_names_hash_bucket_size 256;
+
+ client_max_body_size 100M;
+ keepalive_timeout 75 30;
+
+ include /etc/nginx/conf.d/*.conf;
+}
diff --git a/nginx-configs/templates/default.conf.template b/nginx-configs/templates/default.conf.template
new file mode 100644
index 000000000..b30678ecd
--- /dev/null
+++ b/nginx-configs/templates/default.conf.template
@@ -0,0 +1,108 @@
+map ${NGINX_ENV} $is_log_enabled {
+ "development" 1;
+}
+
+map $http_cookie $auth_token {
+ default "";
+ "~*token=(?[^;]+)" "$token";
+}
+
+upstream backend {
+ server express:${API_PORT};
+}
+
+server {
+ listen 8080 default_server;
+ listen [::]:8080 default_server;
+ server_name ${NGINX_HOST};
+
+ gzip on;
+
+ root ${FRONT_BUILD_FOLDER};
+ index index.html;
+
+ access_log ${LOGS_FOLDER}/nginx.access.log main_bfx if=$is_log_enabled;
+ error_log ${LOGS_FOLDER}/nginx.error.log warn;
+
+ location / {
+ if (-f ${MAINTENANCE_STATIC_FOLDER}/maintenance.on) {
+ root ${MAINTENANCE_STATIC_FOLDER};
+ }
+
+ try_files $uri $uri/ /index.html;
+ }
+
+ error_page 401 = @redirectToMain;
+
+ location @redirectToMain {
+ return 302 $scheme://${NGINX_HOST}:${NGINX_PORT}/;
+ }
+
+ location ~* \.(?:map|css|js|jpg|svg|png|ico|woff2|woff|eot|ttf)$ {
+ expires 10d;
+ add_header Cache-Control "public";
+ }
+
+ location ~* \.(?:json)$ {
+ expires 1d;
+ add_header Cache-Control "public";
+ }
+
+ location /csv {
+ auth_request /api/auth;
+
+ autoindex ${NGINX_AUTOINDEX};
+ autoindex_format xml;
+ xslt_stylesheet /etc/nginx/autoindex-format.xslt path='$uri';
+
+ try_files $uri/ =404;
+ alias ${CSV_FOLDER};
+
+ if ($arg_token ~ ".+") {
+ add_header Set-Cookie "token=$arg_token; Max-Age=3600; SameSite=strict; HttpOnly";
+ }
+ if ($request_filename ~ "^.*/(.+\.(zip|csv))$") {
+ set $fname $1;
+ add_header Content-Disposition 'attachment; filename="$fname"';
+ }
+ }
+
+ location /api/auth {
+ internal;
+ proxy_pass http://backend;
+
+ proxy_http_version 1.1;
+ proxy_pass_request_body off;
+ proxy_set_header Content-Length "";
+ proxy_set_header X-Original-URI $request_uri;
+ proxy_set_header X-Auth-Token $auth_token;
+ }
+
+ location /api {
+ proxy_pass http://backend;
+
+ proxy_http_version 1.1;
+ proxy_cache_bypass $http_upgrade;
+
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ }
+
+ location /ws {
+ proxy_pass http://backend;
+
+ proxy_http_version 1.1;
+ proxy_cache_bypass $http_upgrade;
+
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ }
+}
diff --git a/package.json b/package.json
index d1253a984..db3429939 100644
--- a/package.json
+++ b/package.json
@@ -6,7 +6,7 @@
"license": "Apache-2.0",
"dependencies": {
"better-npm-run": "0.1.1",
- "better-sqlite3": "7.4.4",
+ "better-sqlite3": "7.5.0",
"bfx-facs-db-better-sqlite": "git+https://github.com/bitfinexcom/bfx-facs-db-better-sqlite.git",
"bfx-facs-scheduler": "git+https://github.com:bitfinexcom/bfx-facs-scheduler.git",
"bfx-report": "git+https://github.com/bitfinexcom/bfx-report.git",
diff --git a/scripts/build-ui.sh b/scripts/build-ui.sh
new file mode 100755
index 000000000..59cc017f0
--- /dev/null
+++ b/scripts/build-ui.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+
+set -euxo pipefail
+
+if [ -z "${NGINX_HOST:-}" ]; then
+ printf '%s\n' "'API_PORT' environment variable must be exported" >&2
+ exit 1
+fi
+
+ADDRESS="$NGINX_HOST"
+
+if [ ${NGINX_PORT:-80} != 80 ]; then
+ ADDRESS="$NGINX_HOST:$NGINX_PORT"
+fi
+
+if [ -z "${CI_ENVIRONMENT_NAME:-}" ]; then
+ export CI_ENVIRONMENT_NAME=production
+fi
+
+export SKIP_PREFLIGHT_CHECK=true
+
+ROOT="$PWD"
+frontBuildFolder=${FRONT_BUILD_FOLDER:-"$ROOT/front-build"}
+uiBuildFolder="$ROOT/build"
+
+if ! [ -s "$frontBuildFolder/index.html" ]; then
+ cp -f var/www/html/maintenance.html "$frontBuildFolder/index.html"
+fi
+
+rm -rf $uiBuildFolder/bfx-report-express/*
+
+sed -i -e \
+ "s/HOME_URL: .*,/HOME_URL: \'http:\/\/${ADDRESS}',/g" \
+ $ROOT/src/config.js
+sed -i -e \
+ "s/API_URL: .*,/API_URL: \'http:\/\/${ADDRESS}\/api\',/g" \
+ $ROOT/src/config.js
+sed -i -e \
+ "s/WS_ADDRESS: .*,/WS_ADDRESS: \'ws:\/\/${ADDRESS}\/ws\',/g" \
+ $ROOT/src/config.js
+
+sed -i -e \
+ "s/localExport: false/localExport: true/g" \
+ $ROOT/src/config.js
+sed -i -e \
+ "s/showAuthPage: false/showAuthPage: true/g" \
+ $ROOT/src/config.js
+sed -i -e \
+ "s/showFrameworkMode: false/showFrameworkMode: true/g" \
+ $ROOT/src/config.js
+sed -i -e \
+ "s/hostedFrameworkMode: false/hostedFrameworkMode: true/g" \
+ $ROOT/src/config.js
+
+npm run build
+
+if ! [ -s "$uiBuildFolder/index.html" ]; then
+ printf '%s\n' "The UI build has not been completed successfully" >&2
+ exit 1
+fi
+
+rm -rf $frontBuildFolder/*
+cp -f var/www/html/maintenance.html "$frontBuildFolder/index.html"
+mv -f $uiBuildFolder/* $frontBuildFolder
+
+echo "The UI build has been completed successfully"
diff --git a/scripts/deploy.sh b/scripts/deploy.sh
new file mode 100755
index 000000000..c78e5d16b
--- /dev/null
+++ b/scripts/deploy.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+set -euo pipefail
+
+SCRIPTPATH="$(cd -- "$(dirname "$0")" >/dev/null 2>&1; pwd -P)"
+ROOT="$(dirname "$SCRIPTPATH")"
+CURRDIR="$PWD"
+
+if [ -n "${1:-}" ] && [[ "$1" =~ ^SECRET_KEY= ]]; then
+ export SECRET_KEY=$(echo $1| cut -d'=' -f 2)
+fi
+
+maintenanceFileFlag="$ROOT/scripts/maintenance/maintenance.on"
+
+"$ROOT/scripts/sync-repo.sh" "-a"
+
+cd "$ROOT"
+runningServices=$(docker-compose ps --filter "status=running" --services)
+isNginxRunning=$(echo "$runningServices" | { grep 'nginx' || test $? = 1; } | wc -l)
+
+touch "$maintenanceFileFlag"
+
+if [ $isNginxRunning == 0 ]; then
+ "$ROOT/scripts/launch.sh" "-ad"
+else
+ "$ROOT/scripts/launch.sh" "-weud"
+fi
+
+rm -rf "$maintenanceFileFlag"
+
+cd "$CURRDIR"
diff --git a/scripts/express-entrypoint.sh b/scripts/express-entrypoint.sh
new file mode 100755
index 000000000..b6a2679aa
--- /dev/null
+++ b/scripts/express-entrypoint.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+set -euxo pipefail
+
+enableLogDebug=false
+
+if [ "${NODE_ENV:-"production"}" = "development" ]; then
+ enableLogDebug=true
+fi
+
+export SUPPRESS_NO_CONFIG_WARNING=true
+export NODE_CONFIG="{\"app\":{\"port\":\"${API_PORT}\",\"host\":\"${BIND_HOST}\"},\"grenacheClient\":{\"grape\":\"http://$GRAPE_HOST:$GRAPE_APH\"},\"enableLog\":true,\"enableLogDebug\":$enableLogDebug}"
+
+set -- node "$@"
+
+exec "$@"
diff --git a/scripts/grenache-grape-entrypoint.sh b/scripts/grenache-grape-entrypoint.sh
new file mode 100755
index 000000000..c4451b5c6
--- /dev/null
+++ b/scripts/grenache-grape-entrypoint.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -euxo pipefail
+
+set -- node "$@" \
+ "--dp" "$GRAPE_DP" \
+ "--aph" "$GRAPE_APH" \
+ "--bn" "$GRAPE_BIND:$GRAPE_BN"
+
+exec "$@"
diff --git a/scripts/launch.sh b/scripts/launch.sh
new file mode 100755
index 000000000..aca4d62ba
--- /dev/null
+++ b/scripts/launch.sh
@@ -0,0 +1,138 @@
+#!/bin/bash
+
+set -euo pipefail
+
+SCRIPTPATH="$(cd -- "$(dirname "$0")" >/dev/null 2>&1; pwd -P)"
+ROOT="$(dirname "$SCRIPTPATH")"
+CURRDIR="$PWD"
+
+COLOR_RED="\033[31m"
+COLOR_GREEN="\033[32m"
+COLOR_BLUE="\033[34m"
+COLOR_NORMAL="\033[39m"
+
+programname=$0
+
+launchAll=0
+launchGrapes=0
+launchWorker=0
+launchExpress=0
+launchNginx=0
+buildUI=0
+detachedMode=""
+
+function usage {
+ echo -e "\
+\n${COLOR_GREEN}Usage: $programname [options] [-d] | [-h]${COLOR_BLUE}
+\nOptions:
+ -a Launch all repositories
+ -g Launch grenache-grape network only
+ -w Launch bfx-reports-framework worker only
+ -e Launch bfx-report-express server only
+ -n Launch NGINX reverse proxy server only
+ -u Build bfx-report-ui static files only
+ -d Detached mode: Run containers in the background
+ -h Display help\
+${COLOR_NORMAL}" 1>&2
+}
+
+if [ $# == 0 ]; then
+ usage
+ exit 1
+fi
+
+while getopts "agwenudh" opt; do
+ case "${opt}" in
+ a) launchAll=1;;
+ g) launchGrapes=1;;
+ w) launchWorker=1;;
+ e) launchExpress=1;;
+ n) launchNginx=1;;
+ u) buildUI=1;;
+ d) detachedMode="-d";;
+ h)
+ usage
+ exit 0
+ ;;
+ *)
+ echo -e "\n${COLOR_RED}No reasonable options found!${COLOR_NORMAL}"
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+cd "$ROOT"
+
+if [ $launchGrapes == 1 ] \
+ && [ $launchWorker == 1 ] \
+ && [ $launchExpress == 1 ] \
+ && [ $launchNginx == 1 ] \
+ && [ $buildUI == 1 ]
+then
+ launchAll=1
+ launchGrapes=0
+ launchWorker=0
+ launchExpress=0
+ launchNginx=0
+ buildUI=0
+fi
+
+composeCommonFlags="\
+ --build \
+ --force-recreate \
+ --remove-orphans \
+ --timeout 2 \
+ $detachedMode \
+"
+
+if [ $launchAll == 1 ]; then
+ docker-compose up $composeCommonFlags \
+
+ cd "$CURRDIR"
+ exit 0
+fi
+
+grapesServices=""
+workerService=""
+expressService=""
+
+if [ $launchGrapes == 1 ]; then
+ grapesServices="grape1 grape2"
+fi
+if [ $launchWorker == 1 ]; then
+ runningServices=$(docker-compose ps --filter "status=running" --services)
+ isGrape1Running=$(echo "$runningServices" | { grep 'grape1' || test $? = 1; } | wc -l)
+ isGrape2Running=$(echo "$runningServices" | { grep 'grape2' || test $? = 1; } | wc -l)
+
+ if [ $isGrape1Running == 0 ]; then
+ grapesServices="grape1 $grapesServices"
+ fi
+ if [ $isGrape2Running == 0 ]; then
+ grapesServices="$grapesServices grape2"
+ fi
+
+ workerService="worker"
+fi
+if [ $launchExpress == 1 ]; then
+ expressService="express"
+fi
+
+if [ $launchGrapes == 1 ] \
+ || [ $launchWorker == 1 ] \
+ || [ $launchExpress == 1 ]
+then
+ grapesServices="$(echo -e "${grapesServices}" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')"
+ docker-compose up $composeCommonFlags \
+ $grapesServices $workerService $expressService
+fi
+if [ $launchNginx == 1 ]; then
+ docker-compose up $composeCommonFlags --no-deps \
+ nginx
+fi
+if [ $buildUI == 1 ]; then
+ docker-compose up $composeCommonFlags --no-deps \
+ ui-builder
+fi
+
+cd "$CURRDIR"
diff --git a/scripts/maintenance/index.html b/scripts/maintenance/index.html
new file mode 100644
index 000000000..6cff3ffe1
--- /dev/null
+++ b/scripts/maintenance/index.html
@@ -0,0 +1,219 @@
+
+
+
+
+
+
+ Bitfinex Reports
+
+
+
+
+
+
+ Maintenance in progress
+
+
+
+
+
diff --git a/scripts/setup.sh b/scripts/setup.sh
new file mode 100755
index 000000000..9c639ac39
--- /dev/null
+++ b/scripts/setup.sh
@@ -0,0 +1,269 @@
+#!/bin/bash
+
+set -euo pipefail
+
+SCRIPTPATH="$(cd -- "$(dirname "$0")" >/dev/null 2>&1; pwd -P)"
+ROOT="$(dirname "$SCRIPTPATH")"
+
+user="$(id -un 2>/dev/null || true)"
+MAIN_USER="${USER:-user}"
+
+envFilePath="$ROOT/.env"
+envExampleFilePath="$ROOT/.env.example"
+
+COLOR_RED="\033[31m"
+COLOR_GREEN="\033[32m"
+COLOR_YELLOW="\033[33m"
+COLOR_BLUE="\033[34m"
+COLOR_NORMAL="\033[39m"
+
+programname=$0
+yesToEverything=0
+isDBFoldedRemoved=1
+
+function usage {
+ echo -e "\
+\n${COLOR_GREEN}Usage: $programname [options] [-y] | [-h]${COLOR_BLUE}
+\nOptions:
+ -y With this option, all questions are automatically answered with 'Yes'. \
+In this case, the questions themselves will not be displayed
+ -n Don't remove files of DBs
+ -h Display help\
+${COLOR_NORMAL}" 1>&2
+}
+
+while getopts "ynh" opt; do
+ case "${opt}" in
+ y) yesToEverything=1;;
+ n) isDBFoldedRemoved=0;;
+ h)
+ usage
+ exit 0
+ ;;
+ *)
+ echo -e "\n${COLOR_RED}No reasonable options found!${COLOR_NORMAL}"
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+function askUser {
+ if [ $yesToEverything == 1 ]; then
+ true
+ return
+ fi
+
+ local question="${1:-"What should be done"}"
+
+ local yesptrn="^[+1yY]"
+ local noptrn="^[-0nN]"
+ local yesword="yes"
+ local noword="no"
+
+ local formattedQestion=$(echo -e "\
+\n${COLOR_BLUE}$question \
+(${COLOR_GREEN}${yesword}${COLOR_BLUE} / \
+${COLOR_RED}${noword}${COLOR_BLUE})?${COLOR_NORMAL}\
+")
+
+ while true; do
+ read -p "$formattedQestion " answer
+
+ if [[ "$answer" =~ $yesptrn ]]; then
+ true
+ return
+ fi
+ if [[ "$answer" =~ $noptrn ]]; then
+ false
+ return
+ fi
+
+ echo -e "\
+\n${COLOR_RED}Available answer \
+'${yesword}' / '${noword}'!${COLOR_NORMAL}\
+" >&2
+
+ done
+}
+
+function askUserAboutBranch {
+ local masterBranch="master"
+ local betaBranch="beta"
+ local masterptrn="^$masterBranch$"
+ local betaptrn="^$betaBranch$"
+
+ if [ $yesToEverything == 1 ]; then
+ echo "${REPO_BRANCH:-"$masterBranch"}"
+ return
+ fi
+
+ local formattedQestion=$(echo -e "\
+\n${COLOR_BLUE}Choose syncing repository branch, by default '${COLOR_NORMAL}master${COLOR_BLUE}'\
+\nto apply it just push the 'Enter' key \
+(${COLOR_GREEN}${masterBranch}${COLOR_BLUE} / \
+${COLOR_YELLOW}${betaBranch}${COLOR_BLUE})?${COLOR_NORMAL}\
+")
+
+ while true; do
+ read -p "$formattedQestion " answer
+
+ if [[ -z $answer ]] || [[ "$answer" =~ $masterptrn ]]; then
+ echo "$masterBranch"
+ return
+ fi
+ if [[ "$answer" =~ $betaptrn ]]; then
+ echo "$betaBranch"
+ return
+ fi
+
+ echo -e "\
+\n${COLOR_RED}Available answer \
+'${masterBranch}' / '${betaBranch}'!${COLOR_NORMAL}\
+" >&2
+
+ done
+}
+
+function readLine {
+ local question="$1"
+ local defaultValue=${2:-""}
+ local value=""
+
+ if [ $yesToEverything == 1 ]; then
+ echo $defaultValue
+ return
+ fi
+
+ read -p "$question " value
+
+ echo ${value:-$defaultValue}
+}
+
+function setConfig {
+ local filePath="$1"
+ local propName="$2"
+ local value="$3"
+
+ escapedValue=$(echo $value \
+ | sed 's/\//\\\//g' \
+ | sed 's/\+/\\\+/g' \
+ | sed 's/\./\\\./g')
+
+ sed -i "s/^$propName.*/$propName=$escapedValue/g" "$filePath"
+ grep -q "^$propName" "$filePath" \
+ || echo "$propName=$escapedValue" >> "$filePath"
+}
+
+if ! docker --version; then
+ echo -e "\
+\n${COLOR_RED}Docker has not been found\
+${COLOR_NORMAL}" >&2
+
+ if askUser "Should here try to install Docker using the 'convenience script'"; then
+ # Docker provides a convenient installation script:
+ # https://docs.docker.com/engine/install/ubuntu/#install-using-the-convenience-script
+ # https://github.com/docker/docker-install
+
+ dockerScriptPath="$SCRIPTPATH/get-docker.sh"
+ curl -fsSL https://get.docker.com -o "$dockerScriptPath"
+ sh "$dockerScriptPath"
+ rm -f "$dockerScriptPath"
+
+ usermod -aG docker $MAIN_USER
+ newgrp docker
+ fi
+fi
+
+if ! docker-compose --version; then
+ echo -e "\
+\n${COLOR_RED}Docker-compose has not been found\
+${COLOR_NORMAL}" >&2
+
+ if askUser "Should here try to install docker-compose"; then
+ # Install Compose on Linux systems
+ # https://docs.docker.com/compose/install/#install-compose-on-linux-systems
+
+ curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
+ chmod +x /usr/local/bin/docker-compose
+ ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose 2>/dev/null
+
+ docker-compose --version
+ fi
+fi
+
+if askUser "\
+The app will be setup from scratch!\n\
+All Log and DB and CSV files will be removed!\n\
+Are you sure?\
+"; then
+ rm -rf "$ROOT/logs"
+ rm -rf "$ROOT/csv"
+ mkdir "$ROOT/logs" 2>/dev/null
+ mkdir "$ROOT/csv" 2>/dev/null
+
+ if [ $isDBFoldedRemoved == 1 ]; then
+ find "$ROOT/db" ! -path "$ROOT/db/.gitkeep" -type f -exec rm -rf {} +
+ fi
+fi
+
+secretKey=$({ dd if=/dev/urandom bs=256 count=1 || test $? = 1; } 2>/dev/null | od -A n -t x | tr -d '\n| ')
+size=${#secretKey}
+
+if [ $size != 512 ]; then
+ echo -e "\
+\n${COLOR_RED}The secret key has not been generated!\n
+The default value 'secretKey' will be set to 'SECRET_KEY'.\n
+Please change it setting the corresponding environment variable for security.\
+${COLOR_NORMAL}" >&2
+
+ secretKey="secretKey"
+else
+ echo -e "\
+\n${COLOR_GREEN}A new secret key has been generated successfully:${COLOR_NORMAL}\n\n\
+$secretKey\
+\n\n${COLOR_YELLOW}Warning, don't store secret key value in the '.env' file for production,\n\
+need to set it into 'SECRET_KEY' environment variable!\
+${COLOR_NORMAL}"
+fi
+
+repoBranch=$(askUserAboutBranch)
+
+nginxPortQestion=$(echo -e "\
+\n${COLOR_BLUE}Enter NGINX port, by default '${COLOR_NORMAL}80${COLOR_BLUE}',\
+\nto apply it just push the 'Enter' key\
+${COLOR_NORMAL}")
+nginxPort=$(readLine "$nginxPortQestion" 80)
+
+nginxHostQestion=$(echo -e "\
+\n${COLOR_BLUE}Enter NGINX host, by default '${COLOR_NORMAL}localhost${COLOR_BLUE}',\
+\nto apply it just push the 'Enter' key\
+${COLOR_NORMAL}")
+nginxHost=$(readLine "$nginxHostQestion" "localhost")
+
+if [ ! -f "$envFilePath" ]; then
+ cp -f "$envExampleFilePath" "$envFilePath"
+
+ echo -e "\
+\n${COLOR_YELLOW}The '.env' file has been made from '.env.example' template!\
+${COLOR_NORMAL}"
+fi
+
+setConfig "$envFilePath" "REPO_BRANCH" $repoBranch
+setConfig "$envFilePath" "NGINX_PORT" $nginxPort
+setConfig "$envFilePath" "NGINX_HOST" $nginxHost
+setConfig "$envFilePath" "SECRET_KEY" $secretKey
+
+if askUser "Should all repository/submodules be synced?"; then
+ source "$ROOT/scripts/sync-repo.sh" "-a"
+fi
+
+envFile=$(cat "$envFilePath")
+echo -e "\
+\n${COLOR_BLUE}The contents of '.env' file:${COLOR_NORMAL}\
+\n*****************************\
+\n\n${COLOR_YELLOW}$envFile${COLOR_NORMAL}\
+\n\n*****************************\
+"
+
+echo -e "\n${COLOR_GREEN}DONE!${COLOR_NORMAL}\n"
diff --git a/scripts/sync-repo.sh b/scripts/sync-repo.sh
new file mode 100755
index 000000000..9a01a330f
--- /dev/null
+++ b/scripts/sync-repo.sh
@@ -0,0 +1,119 @@
+#!/bin/bash
+
+set -euo pipefail
+
+SCRIPTPATH="$(cd -- "$(dirname "$0")" >/dev/null 2>&1; pwd -P)"
+ROOT="$(dirname "$SCRIPTPATH")"
+CURRDIR="$PWD"
+
+COLOR_RED="\033[31m"
+COLOR_GREEN="\033[32m"
+COLOR_BLUE="\033[34m"
+COLOR_NORMAL="\033[39m"
+
+dotEnvFilePath="$ROOT/.env"
+
+set -a
+[ -f "$dotEnvFilePath" ] && . "$dotEnvFilePath"
+set +a
+
+programname=$0
+uiSubmoduleName=bfx-report-ui
+expressSubmoduleName=bfx-report-express
+branch="${REPO_BRANCH:-"master"}"
+
+syncAll=0
+syncWorker=0
+syncUI=0
+syncExpress=0
+
+function usage {
+ echo -e "\
+\n${COLOR_GREEN}Usage: $programname [options] | [-h]${COLOR_BLUE}
+\nOptions:
+ -a Sync all repositories
+ -w Sync bfx-reports-framework only
+ -u Sync bfx-report-ui only
+ -e Sync bfx-report-express only
+ -h Display help\
+${COLOR_NORMAL}" 1>&2
+}
+
+if [ $# == 0 ]; then
+ usage
+ exit 1
+fi
+
+while getopts "awueh" opt; do
+ case "${opt}" in
+ a) syncAll=1;;
+ w) syncWorker=1;;
+ u) syncUI=1;;
+ e) syncExpress=1;;
+ h)
+ usage
+ exit 0
+ ;;
+ *)
+ echo -e "\n${COLOR_RED}No reasonable options found!${COLOR_NORMAL}"
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+cd "$ROOT"
+
+if [ $syncWorker == 1 ] && [ $syncUI == 1 ] && [ $syncExpress == 1 ]; then
+ syncAll=1
+ syncWorker=0
+ syncUI=0
+ syncExpress=0
+fi
+
+if [ $syncAll == 1 ] || [ $syncWorker == 1 ] || [ $syncUI == 1 ] || [ $syncExpress == 1 ]; then
+ git config url."https://github.com/".insteadOf git@github.com:
+ git fetch --recurse-submodules=on-demand
+ git submodule sync --recursive
+ git config --unset url."https://github.com/".insteadOf
+fi
+
+if [ $syncAll == 1 ]; then
+ git clean -fd
+ git reset --hard "origin/$branch"
+
+ git submodule foreach --recursive "git clean -fd; git reset --hard HEAD"
+ git submodule update --init --force --recursive
+
+ cd "$CURRDIR"
+ exit 0
+fi
+if [ $syncWorker == 1 ]; then
+ git clean -fd
+ git reset --hard "origin/$branch"
+fi
+if [ $syncUI == 1 ]; then
+ git submodule foreach '
+ if [ "$sm_path" = "$uiSubmoduleName" ]; then
+ git clean -fd
+ git reset --hard HEAD
+ fi
+'
+
+ git submodule update --init --force $uiSubmoduleName
+fi
+if [ $syncExpress == 1 ]; then
+ git submodule foreach --recursive '
+ if [ "$sm_path" = "$expressSubmoduleName" ]; then
+ git clean -fd
+ git reset --hard HEAD
+ fi
+'
+ git submodule foreach '
+ if [ "$sm_path" = "$uiSubmoduleName" ]; then
+ git submodule update --init --force $expressSubmoduleName
+ fi
+'
+fi
+
+cd "$CURRDIR"
diff --git a/scripts/worker-entrypoint.sh b/scripts/worker-entrypoint.sh
new file mode 100755
index 000000000..d1bcf00b8
--- /dev/null
+++ b/scripts/worker-entrypoint.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+set -euo pipefail
+
+if [ -z "${SECRET_KEY:-}" ]; then
+ printf '%s\n' "'SECRET_KEY' environment variable must be exported" >&2
+ exit 1
+fi
+if [ "${NGINX_AUTOINDEX:-"production"}" = "development" ]; then
+ enableLogDebug=true
+fi
+
+set -- node "$@" \
+ "--wtype" "wrk-report-framework-api" \
+ "--isSchedulerEnabled" "true" \
+ "--env" "$NODE_ENV" \
+ "--apiPort" "$WORKER_API_PORT" \
+ "--wsPort" "$WORKER_WS_PORT" \
+ "--tempFolder" "$TEMP_FOLDER" \
+ "--dbFolder" "$DB_FOLDER" \
+ "--csvFolder" "$CSV_FOLDER" \
+ "--logsFolder" "$LOGS_FOLDER" \
+ "--grape" "http://$GRAPE_HOST:$GRAPE_APH" \
+ "--secretKey" "$SECRET_KEY"
+
+if [ -n "${SCHEDULER_RULE:-}" ]; then
+ set -- "$@" "--schedulerRule" "$SCHEDULER_RULE"
+fi
+if [ "${NGINX_AUTOINDEX:-}" = "on" ]; then
+ set -- "$@" "--remoteCsvUrn" "csv/"
+fi
+
+exec "$@"
diff --git a/terraform/.terraform.lock.hcl b/terraform/.terraform.lock.hcl
new file mode 100644
index 000000000..ecd6ef288
--- /dev/null
+++ b/terraform/.terraform.lock.hcl
@@ -0,0 +1,97 @@
+# This file is maintained automatically by "terraform init".
+# Manual edits may be lost in future updates.
+
+provider "registry.terraform.io/hashicorp/aws" {
+ version = "4.7.0"
+ constraints = ">= 2.0.0, >= 3.0.0, >= 3.63.0, ~> 4.7.0"
+ hashes = [
+ "h1:H2d956E++qhRN/ZOHMoOxXSHEJNCr4CpdSSPY3e9mX8=",
+ "zh:00f77e618cdceb507b7033758a94459ca1d2904ec0f99d9dfdbfdd98f3f219d8",
+ "zh:49989eb97859e5ef7f2123422fceaa3a1d5d63a4b7800591737e835dd218701e",
+ "zh:5107f889858f99efcfb37a53dd5f5e1b064ae6debcd13d493ae4bb3c02370d1b",
+ "zh:7d4c85de26cbb8662cba441c923f9928756800380f36a68cf49f60f5b3212165",
+ "zh:87f1b4a26ed3e0741670dfc8708b45bf17ad77d3e72b43bfc123ffae170b3578",
+ "zh:9470ef10e55fdacec8aaf5457eb299c6624e05cc9890e162244c446bb704b93a",
+ "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
+ "zh:acbf6fbdbdf284829cdfd5652e8fc17c9ad9f458978a16ea63b2bed2b4d98e41",
+ "zh:b491c45ae264512d744992a0ab88660e878b4462a38835dab54e61a61ade9378",
+ "zh:ee2a5908c074cb5ebe53d5abed99096695e3346833a80d9833c26882ddabf913",
+ "zh:f2b311a760b5a5a2a9c889632c15dd5617350ae95ac7e2253aab3931fbc41c37",
+ "zh:fdf1b5a37be1a3aa6953f4b56d7294c1e86da12bb661c24f4e0840321c10973f",
+ ]
+}
+
+provider "registry.terraform.io/hashicorp/local" {
+ version = "2.2.2"
+ hashes = [
+ "h1:5UYW2wJ320IggrzLt8tLD6MowePqycWtH1b2RInHZkE=",
+ "zh:027e4873c69da214e2fed131666d5de92089732a11d096b68257da54d30b6f9d",
+ "zh:0ba2216e16cfb72538d76a4c4945b4567a76f7edbfef926b1c5a08d7bba2a043",
+ "zh:1fee8f6aae1833c27caa96e156cf99a681b6f085e476d7e1b77d285e21d182c1",
+ "zh:2e8a3e72e877003df1c390a231e0d8e827eba9f788606e643f8e061218750360",
+ "zh:719008f9e262aa1523a6f9132adbe9eee93c648c2981f8359ce41a40e6425433",
+ "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
+ "zh:9a70fdbe6ef955c4919a4519caca116f34c19c7ddedd77990fbe4f80fe66dc84",
+ "zh:abc412423d670cbb6264827fa80e1ffdc4a74aff3f19ba6a239dd87b85b15bec",
+ "zh:ae953a62c94d2a2a0822e5717fafc54e454af57bd6ed02cd301b9786765c1dd3",
+ "zh:be0910bdf46698560f9e86f51a4ff795c62c02f8dc82b2b1dab77a0b3a93f61e",
+ "zh:e58f9083b7971919b95f553227adaa7abe864fce976f0166cf4d65fc17257ff2",
+ "zh:ff4f77cbdbb22cc98182821c7ef84dce16298ab0e997d5c7fae97247f7a4bcb0",
+ ]
+}
+
+provider "registry.terraform.io/hashicorp/null" {
+ version = "3.1.1"
+ hashes = [
+ "h1:71sNUDvmiJcijsvfXpiLCz0lXIBSsEJjMxljt7hxMhw=",
+ "zh:063466f41f1d9fd0dd93722840c1314f046d8760b1812fa67c34de0afcba5597",
+ "zh:08c058e367de6debdad35fc24d97131c7cf75103baec8279aba3506a08b53faf",
+ "zh:73ce6dff935150d6ddc6ac4a10071e02647d10175c173cfe5dca81f3d13d8afe",
+ "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
+ "zh:8fdd792a626413502e68c195f2097352bdc6a0df694f7df350ed784741eb587e",
+ "zh:976bbaf268cb497400fd5b3c774d218f3933271864345f18deebe4dcbfcd6afa",
+ "zh:b21b78ca581f98f4cdb7a366b03ae9db23a73dfa7df12c533d7c19b68e9e72e5",
+ "zh:b7fc0c1615dbdb1d6fd4abb9c7dc7da286631f7ca2299fb9cd4664258ccfbff4",
+ "zh:d1efc942b2c44345e0c29bc976594cb7278c38cfb8897b344669eafbc3cddf46",
+ "zh:e356c245b3cd9d4789bab010893566acace682d7db877e52d40fc4ca34a50924",
+ "zh:ea98802ba92fcfa8cf12cbce2e9e7ebe999afbf8ed47fa45fc847a098d89468b",
+ "zh:eff8872458806499889f6927b5d954560f3d74bf20b6043409edf94d26cd906f",
+ ]
+}
+
+provider "registry.terraform.io/hashicorp/random" {
+ version = "3.1.2"
+ hashes = [
+ "h1:5A5VsY5wNmOZlupUcLnIoziMPn8htSZBXbP3lI7lBEM=",
+ "zh:0daceba867b330d3f8e2c5dc895c4291845a78f31955ce1b91ab2c4d1cd1c10b",
+ "zh:104050099efd30a630741f788f9576b19998e7a09347decbec3da0b21d64ba2d",
+ "zh:173f4ef3fdf0c7e2564a3db0fac560e9f5afdf6afd0b75d6646af6576b122b16",
+ "zh:41d50f975e535f968b3f37170fb07937c15b76d85ba947d0ce5e5ff9530eda65",
+ "zh:51a5038867e5e60757ed7f513dd6a973068241190d158a81d1b69296efb9cb8d",
+ "zh:6432a568e97a5a36cc8aebca5a7e9c879a55d3bc71d0da1ab849ad905f41c0be",
+ "zh:6bac6501394b87138a5e17c9f3a41e46ff7833ad0ba2a96197bb7787e95b641c",
+ "zh:6c0a7f5faacda644b022e7718e53f5868187435be6d000786d1ca05aa6683a25",
+ "zh:74c89de3fa6ef3027efe08f8473c2baeb41b4c6cee250ba7aeb5b64e8c79800d",
+ "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
+ "zh:b29eabbf0a5298f0e95a1df214c7cfe06ea9bcf362c63b3ad2f72d85da7d4685",
+ "zh:e891458c7a61e5b964e09616f1a4f87d0471feae1ec04cc51776e7dec1a3abce",
+ ]
+}
+
+provider "registry.terraform.io/hashicorp/tls" {
+ version = "3.1.0"
+ hashes = [
+ "h1:fUJX8Zxx38e2kBln+zWr1Tl41X+OuiE++REjrEyiOM4=",
+ "zh:3d46616b41fea215566f4a957b6d3a1aa43f1f75c26776d72a98bdba79439db6",
+ "zh:623a203817a6dafa86f1b4141b645159e07ec418c82fe40acd4d2a27543cbaa2",
+ "zh:668217e78b210a6572e7b0ecb4134a6781cc4d738f4f5d09eb756085b082592e",
+ "zh:95354df03710691773c8f50a32e31fca25f124b7f3d6078265fdf3c4e1384dca",
+ "zh:9f97ab190380430d57392303e3f36f4f7835c74ea83276baa98d6b9a997c3698",
+ "zh:a16f0bab665f8d933e95ca055b9c8d5707f1a0dd8c8ecca6c13091f40dc1e99d",
+ "zh:be274d5008c24dc0d6540c19e22dbb31ee6bfdd0b2cddd4d97f3cd8a8d657841",
+ "zh:d5faa9dce0a5fc9d26b2463cea5be35f8586ab75030e7fa4d4920cd73ee26989",
+ "zh:e9b672210b7fb410780e7b429975adcc76dd557738ecc7c890ea18942eb321a5",
+ "zh:eb1f8368573d2370605d6dbf60f9aaa5b64e55741d96b5fb026dbfe91de67c0d",
+ "zh:fc1e12b713837b85daf6c3bb703d7795eaf1c5177aebae1afcf811dd7009f4b0",
+ ]
+}
diff --git a/terraform/config/backend.conf.example b/terraform/config/backend.conf.example
new file mode 100644
index 000000000..b2b453aba
--- /dev/null
+++ b/terraform/config/backend.conf.example
@@ -0,0 +1,10 @@
+bucket="THE_NAME_OF_THE_STATE_BUCKET"
+kms_key_id="THE_ID_OF_THE_KMS_KEY"
+
+key="states/bfx-reports-framework.tfstate"
+dynamodb_table="tf-remote-state-lock"
+encrypt=true
+
+shared_credentials_file = "config/credentials.conf"
+profile = "default"
+region="eu-central-1"
diff --git a/terraform/config/credentials.conf.example b/terraform/config/credentials.conf.example
new file mode 100644
index 000000000..78cfb0df3
--- /dev/null
+++ b/terraform/config/credentials.conf.example
@@ -0,0 +1,3 @@
+[default]
+aws_access_key_id=
+aws_secret_access_key=
diff --git a/terraform/main.tf b/terraform/main.tf
new file mode 100644
index 000000000..5832d4d5e
--- /dev/null
+++ b/terraform/main.tf
@@ -0,0 +1,61 @@
+locals {
+ common_tags = merge(
+ var.common_tags,
+ {
+ Namespace = var.namespace,
+ Workspace = terraform.workspace,
+ Environment = var.env
+ }
+ )
+}
+
+module "app" {
+ source = "./modules/app"
+
+ count = var.is_app_enabled ? 1 : 0
+
+ namespace = var.namespace
+ env = var.env
+ aws_instance_type = var.aws_instance_type
+ aws_instance_detailed_mon = var.aws_instance_detailed_mon
+
+ db_volume_device_name = var.db_volume_device_name
+ db_volume_size = var.db_volume_size
+ db_volume_type = var.db_volume_type
+ is_db_volume_encrypted = var.is_db_volume_encrypted
+
+ repo_fork = var.repo_fork
+ repo_branch = var.repo_branch
+ nginx_autoindex = var.nginx_autoindex
+ nginx_port = var.nginx_port
+
+ aws_vpc_cidr = var.aws_vpc_cidr
+ allowed_ports = var.allowed_ports
+
+ key_name = var.key_name
+
+ # AWS KMS supports automatic key rotation only for symmetric KMS keys
+ # https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html
+ customer_master_key_spec = var.customer_master_key_spec
+ enable_key_rotation = var.enable_key_rotation
+
+ common_tags = local.common_tags
+}
+
+module "backend" {
+ source = "./modules/backend"
+
+ count = var.is_backend_s3_enabled ? 1 : 0
+
+ providers = {
+ aws = aws
+ aws.replica = aws.replica
+ }
+
+ namespace = var.namespace
+ is_backend_s3_config_removed = var.is_backend_s3_config_removed
+ is_backend_s3_replication_enabled = var.is_backend_s3_replication_enabled
+ is_backend_s3_bucket_force_destroyed = var.is_backend_s3_bucket_force_destroyed
+ tf_lock_dynamodb_table_name = var.tf_lock_dynamodb_table_name
+ common_tags = local.common_tags
+}
diff --git a/terraform/modules/app/main.tf b/terraform/modules/app/main.tf
new file mode 100644
index 000000000..c5489d597
--- /dev/null
+++ b/terraform/modules/app/main.tf
@@ -0,0 +1,78 @@
+locals {
+ ec2_user_name = "ubuntu"
+ ec2_root_dir = "/home/${local.ec2_user_name}/bfx-reports-framework"
+}
+
+module "network" {
+ source = "../network"
+ namespace = var.namespace
+ vpc_cidr = var.aws_vpc_cidr
+ common_tags = var.common_tags
+ allowed_ports = var.allowed_ports
+ azs = data.aws_availability_zones.available.names
+}
+
+module "ec2" {
+ source = "../ec2"
+ ssh_connect_script_name = "worker-connect.sh"
+ namespace = var.namespace
+ aws_instance_type = var.aws_instance_type
+ aws_instance_detailed_mon = var.aws_instance_detailed_mon
+ sec_gr_ids = [module.network.sec_gr_pub_id]
+ subnet_id = module.network.vpc.public_subnets[0]
+ key_name = module.ssh_key.key_name
+ private_key = module.ssh_key.private_key
+ user_name = local.ec2_user_name
+ root_dir = local.ec2_root_dir
+ az = data.aws_availability_zones.available.names[0]
+ db_volume_device_name = var.db_volume_device_name
+ db_volume_size = var.db_volume_size
+ db_volume_type = var.db_volume_type
+ is_db_volume_encrypted = var.is_db_volume_encrypted
+ kms_key_arn = module.kms_key.kms_key_arn
+ secret_key = module.ssm_param_secret_key.sec_string
+ aws_eip_id = module.network.instance_eip.id
+
+ user_data = templatefile("setup.sh.tpl", {
+ user_name = local.ec2_user_name
+ root_dir = local.ec2_root_dir
+ env = var.env
+ nginx_autoindex = var.nginx_autoindex
+ repo_fork = var.repo_fork
+ repo_branch = var.repo_branch
+ nginx_port = var.nginx_port
+ nginx_host = module.network.instance_eip.public_dns
+ db_volume_device_name = var.db_volume_device_name
+ })
+
+ common_tags = var.common_tags
+}
+
+module "ssh_key" {
+ source = "../ssh_key"
+ key_name = var.key_name
+}
+
+module "kms_key" {
+ source = "../kms_key"
+ namespace = var.namespace
+ customer_master_key_spec = var.customer_master_key_spec
+ # AWS KMS supports automatic key rotation only for symmetric KMS keys
+ # https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html
+ enable_key_rotation = var.enable_key_rotation
+ user_arn = data.aws_caller_identity.current.arn
+
+ common_tags = var.common_tags
+}
+
+module "ssm_param_secret_key" {
+ source = "../ssm_random_sec_param"
+ namespace = var.namespace
+ env = var.env
+ name = "secret_key"
+ length = 512
+ common_tags = var.common_tags
+}
+
+data "aws_availability_zones" "available" {}
+data "aws_caller_identity" "current" {}
diff --git a/terraform/modules/app/outputs.tf b/terraform/modules/app/outputs.tf
new file mode 100644
index 000000000..119d636e1
--- /dev/null
+++ b/terraform/modules/app/outputs.tf
@@ -0,0 +1,7 @@
+output bfx_reports_framework_pub_ip {
+ value = module.network.instance_eip.public_ip
+}
+
+output bfx_reports_framework_pub_dns {
+ value = module.network.instance_eip.public_dns
+}
diff --git a/terraform/modules/app/variables.tf b/terraform/modules/app/variables.tf
new file mode 100644
index 000000000..5cb09e869
--- /dev/null
+++ b/terraform/modules/app/variables.tf
@@ -0,0 +1,118 @@
+variable "namespace" {
+ type = string
+ description = "Namespace"
+ default = "Custom"
+}
+
+variable "key_name" {
+ type = string
+ description = "AWS SSH key name"
+ default = "bfx-ssh-key"
+}
+
+variable "env" {
+ type = string
+ description = "Environment"
+ default = "production"
+
+ validation {
+ condition = contains(["production", "development"], var.env)
+ error_message = "The env value must be one of the following \"production\" or \"development\"."
+ }
+}
+
+variable "db_volume_device_name" {
+ type = string
+ description = "DB volume device name"
+ default = "/dev/xvdf"
+}
+
+variable "db_volume_size" {
+ type = number
+ description = "DB volume size in Gb"
+ default = 10
+}
+
+variable "db_volume_type" {
+ type = string
+ description = "DB volume type, see https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ebs_volume#type"
+ default = "gp3"
+}
+
+variable "is_db_volume_encrypted" {
+ type = bool
+ description = "Is DB volume encrypted"
+ default = true
+}
+
+variable "customer_master_key_spec" {
+ type = string
+ description = "Key specs for KMS keys"
+ default = "SYMMETRIC_DEFAULT"
+}
+
+variable "enable_key_rotation" {
+ type = bool
+ description = "Specifies whether key rotation is enabled, AWS KMS supports automatic key rotation only for symmetric KMS keys"
+ default = true
+}
+
+variable "nginx_autoindex" {
+ type = string
+ description = "NGINX autoindex"
+ default = "on"
+
+ validation {
+ condition = contains(["on", "off"], var.nginx_autoindex)
+ error_message = "The nginx_autoindex value must be one of the following \"on\" or \"off\"."
+ }
+}
+
+variable "repo_fork" {
+ type = string
+ description = "Repository fork"
+ default = "bitfinexcom"
+}
+
+variable "repo_branch" {
+ type = string
+ description = "Repository branch"
+ default = "master"
+}
+
+
+variable "nginx_port" {
+ type = number
+ description = "NGINX port"
+ default = 80
+}
+
+variable "aws_instance_type" {
+ type = string
+ description = "AWS instance type"
+ default = "t2.medium"
+}
+
+variable "allowed_ports" {
+ type = list(number)
+ description = "Allowed ports"
+ default = [80, 443, 22]
+}
+
+variable "aws_instance_detailed_mon" {
+ type = bool
+ description = "AWS instance detailed monitoring"
+ default = true
+}
+
+variable "aws_vpc_cidr" {
+ type = string
+ description = "A /16 CIDR range definition, such as 10.11.0.0/16, that the VPC will use"
+ default = "10.11.0.0/16"
+}
+
+variable "common_tags" {
+ type = map
+ description = "Common tags"
+ default = {}
+}
diff --git a/terraform/modules/backend/main.tf b/terraform/modules/backend/main.tf
new file mode 100644
index 000000000..6bba3130e
--- /dev/null
+++ b/terraform/modules/backend/main.tf
@@ -0,0 +1,68 @@
+module "remote_state" {
+ source = "nozaq/remote-state-s3-backend/aws"
+ version = "1.1.2"
+
+ terraform_iam_policy_create = true
+ enable_replication = var.is_backend_s3_replication_enabled
+ s3_bucket_force_destroy = var.is_backend_s3_bucket_force_destroyed
+ dynamodb_table_name = var.tf_lock_dynamodb_table_name
+ replica_bucket_prefix = "terraform-state-bucket-replica-"
+ state_bucket_prefix = "terraform-state-bucket-"
+
+ providers = {
+ aws = aws
+ aws.replica = aws.replica
+ }
+
+ tags = merge(
+ var.common_tags,
+ { Name = "${var.namespace}_TF_Backend" }
+ )
+}
+
+resource "local_file" "backend_conf" {
+ filename = "config/backend.conf"
+
+ content = < 0 ? var.az : null
+ secret_key = nonsensitive(var.secret_key)
+}
+
+resource "aws_instance" "ubuntu" {
+ ami = data.aws_ami.ubuntu.id
+ instance_type = var.aws_instance_type
+ monitoring = var.aws_instance_detailed_mon
+
+ user_data = var.user_data
+
+ availability_zone = local.availability_zone
+ vpc_security_group_ids = var.sec_gr_ids
+ subnet_id = var.subnet_id
+
+ key_name = var.key_name
+
+ tags = merge(
+ var.common_tags,
+ { Name = "${var.namespace}_Instance" }
+ )
+}
+
+resource "local_sensitive_file" "private_key" {
+ filename = "${var.ssh_connect_script_name}"
+
+ content = < /dev/null 2>&1",
+ "echo \"cloud-init is done\""
+ ]
+ }
+ provisioner "remote-exec" {
+ inline = [
+ "if [ -f \"${var.root_dir}/READY\" ]; then \"${var.root_dir}/scripts/deploy.sh\" SECRET_KEY=${local.secret_key}; fi",
+ "if ! [ -f \"${var.root_dir}/READY\" ]; then echo \"The bash setup script has not been finished successfully!\"; exit 1; fi"
+ ]
+ }
+}
+
+resource "aws_ebs_volume" "ebs_volume_1" {
+ availability_zone = local.availability_zone
+ size = var.db_volume_size
+ type = var.db_volume_type
+ encrypted = var.is_db_volume_encrypted
+ kms_key_id = var.is_db_volume_encrypted ? var.kms_key_arn : null
+
+ tags = merge(
+ var.common_tags,
+ { Name = "${var.namespace}_Volume" }
+ )
+}
+
+resource "aws_volume_attachment" "ebs_volume_1_attachment" {
+ device_name = var.db_volume_device_name
+ volume_id = aws_ebs_volume.ebs_volume_1.id
+ instance_id = aws_instance.ubuntu.id
+ skip_destroy = false
+ stop_instance_before_detaching = true
+ force_detach = true
+}
+
+resource "aws_eip_association" "eip_assoc" {
+ instance_id = aws_instance.ubuntu.id
+ allocation_id = var.aws_eip_id
+}
+
+data "aws_ami" "ubuntu" {
+ most_recent = true
+
+ filter {
+ name = "name"
+ values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"]
+ }
+
+ filter {
+ name = "virtualization-type"
+ values = ["hvm"]
+ }
+
+ owners = ["099720109477"] # Canonical
+}
diff --git a/terraform/modules/ec2/outputs.tf b/terraform/modules/ec2/outputs.tf
new file mode 100644
index 000000000..127f884a9
--- /dev/null
+++ b/terraform/modules/ec2/outputs.tf
@@ -0,0 +1,7 @@
+output instance_pub_ip {
+ value = aws_instance.ubuntu.public_ip
+}
+
+output instance_pub_dns {
+ value = aws_instance.ubuntu.public_dns
+}
diff --git a/terraform/modules/ec2/variables.tf b/terraform/modules/ec2/variables.tf
new file mode 100644
index 000000000..d02c461a7
--- /dev/null
+++ b/terraform/modules/ec2/variables.tf
@@ -0,0 +1,114 @@
+variable "namespace" {
+ type = string
+ default = "Custom"
+}
+
+variable "aws_instance_type" {
+ type = string
+ description = "AWS instance type"
+ default = "t2.medium"
+}
+
+variable "aws_eip_id" {
+ type = string
+ description = "AWS EIP ID"
+}
+
+variable "aws_instance_detailed_mon" {
+ type = bool
+ description = "AWS instance detailed monitoring"
+ default = true
+}
+
+variable "user_data" {
+ type = string
+ description = "Setup bash script"
+}
+
+variable "secret_key" {
+ type = string
+ description = "User Secret Key for encription"
+ sensitive = true
+}
+
+variable "sec_gr_ids" {
+ type = list
+ description = "AWS security group IDs"
+}
+
+variable "az" {
+ type = string
+ description = "Available zone name in the region"
+ default = null
+}
+
+variable "subnet_id" {
+ type = string
+ description = "AWS subnet ID"
+}
+
+variable "key_name" {
+ type = string
+ description = "AWS SSH key name"
+ default = "bfx-ssh-key"
+}
+
+variable "private_key" {
+ type = string
+ description = "AWS SSH private key"
+ sensitive = true
+}
+
+variable "user_name" {
+ type = string
+ description = "AWS EC2 user name"
+ default = "ubuntu"
+}
+
+variable "root_dir" {
+ type = string
+ description = "AWS EC2 root dir"
+ default = "project"
+}
+
+variable "db_volume_device_name" {
+ type = string
+ description = "DB volume device name"
+ default = "/dev/xvdf"
+}
+
+variable "db_volume_size" {
+ type = number
+ description = "DB volume size in Gb"
+ default = 10
+}
+
+variable "db_volume_type" {
+ type = string
+ description = "DB volume type, see https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ebs_volume#type"
+ default = "gp3"
+}
+
+variable "is_db_volume_encrypted" {
+ type = bool
+ description = "Is DB volume encrypted"
+ default = true
+}
+
+variable "kms_key_arn" {
+ type = string
+ description = "Amazon Resource Name (ARN) of the KMS Key to use when encrypting the volume"
+ default = null
+}
+
+variable "ssh_connect_script_name" {
+ type = string
+ description = "SSH connect script name"
+ default = "ssh-connect.sh"
+}
+
+variable "common_tags" {
+ type = map
+ description = "Common tags"
+ default = {}
+}
diff --git a/terraform/modules/kms_key/main.tf b/terraform/modules/kms_key/main.tf
new file mode 100644
index 000000000..3e37f3b82
--- /dev/null
+++ b/terraform/modules/kms_key/main.tf
@@ -0,0 +1,98 @@
+resource "aws_kms_key" "kms_key" {
+ description = "General KMS key used for all resources in account"
+ customer_master_key_spec = var.customer_master_key_spec
+ enable_key_rotation = var.enable_key_rotation
+ is_enabled = var.is_enabled
+ multi_region = var.multi_region
+
+ policy = data.aws_iam_policy_document.kms_key.json
+
+ tags = merge(
+ var.common_tags,
+ { Name = "${var.namespace}_KMSKey" }
+ )
+}
+
+data "aws_iam_policy_document" "kms_key" {
+ statement {
+ sid = "1"
+
+ principals {
+ type = "AWS"
+ identifiers = [
+ var.user_arn
+ ]
+ }
+
+ actions = [
+ "kms:CreateGrant",
+ "kms:Decrypt",
+ "kms:Describe*",
+ "kms:Encrypt",
+ "kms:GenerateDataKey*",
+ "kms:ReEncrypt*"
+ ]
+
+ resources = [
+ "*",
+ ]
+ }
+
+ statement {
+
+ principals {
+ type = "AWS"
+ identifiers = [
+ var.user_arn
+ ]
+ }
+
+ actions = [
+ "kms:CreateGrant",
+ ]
+
+ resources = [
+ "*",
+ ]
+
+ condition {
+ test = "Bool"
+ variable = "kms:GrantIsForAWSResource"
+
+ values = [
+ true
+ ]
+ }
+ }
+
+ statement {
+
+ principals {
+ type = "AWS"
+ identifiers = [
+ var.user_arn
+ ]
+ }
+
+ actions = [
+ "kms:Create*",
+ "kms:Describe*",
+ "kms:Enable*",
+ "kms:List*",
+ "kms:Put*",
+ "kms:Update*",
+ "kms:Revoke*",
+ "kms:Disable*",
+ "kms:Get*",
+ "kms:Delete*",
+ "kms:ScheduleKeyDeletion",
+ "kms:CancelKeyDeletion",
+ "kms:TagResource",
+ "kms:UntagResource"
+ ]
+
+ resources = [
+ "*",
+ ]
+ }
+}
diff --git a/terraform/modules/kms_key/outputs.tf b/terraform/modules/kms_key/outputs.tf
new file mode 100644
index 000000000..0463247c1
--- /dev/null
+++ b/terraform/modules/kms_key/outputs.tf
@@ -0,0 +1,11 @@
+output kms_key_arn {
+ value = aws_kms_key.kms_key.arn
+}
+
+output kms_key_id {
+ value = aws_kms_key.kms_key.id
+}
+
+output kms_key_policy_document {
+ value = data.aws_iam_policy_document.kms_key
+}
diff --git a/terraform/modules/kms_key/variables.tf b/terraform/modules/kms_key/variables.tf
new file mode 100644
index 000000000..cb55f1423
--- /dev/null
+++ b/terraform/modules/kms_key/variables.tf
@@ -0,0 +1,39 @@
+variable "namespace" {
+ type = string
+ default = "Custom"
+}
+
+variable "user_arn" {
+ type = string
+ description = "Amazon Resource Name (ARN) of the current caller"
+}
+
+variable "customer_master_key_spec" {
+ type = string
+ description = "Key specs for KMS keys"
+ default = "SYMMETRIC_DEFAULT"
+}
+
+variable "enable_key_rotation" {
+ type = bool
+ description = "Specifies whether key rotation is enabled"
+ default = true
+}
+
+variable "is_enabled" {
+ type = bool
+ description = "Specifies whether the key is enabled"
+ default = true
+}
+
+variable "multi_region" {
+ type = bool
+ description = "Indicates whether the KMS key is a multi-Region"
+ default = false
+}
+
+variable "common_tags" {
+ type = map
+ description = "Common tags"
+ default = {}
+}
diff --git a/terraform/modules/network/main.tf b/terraform/modules/network/main.tf
new file mode 100644
index 000000000..17d6622df
--- /dev/null
+++ b/terraform/modules/network/main.tf
@@ -0,0 +1,72 @@
+module "vpc" {
+ # https://github.com/terraform-aws-modules/terraform-aws-vpc
+ source = "terraform-aws-modules/vpc/aws"
+ version = "3.13.0"
+
+ name = "${var.namespace}_VPC"
+ cidr = var.vpc_cidr
+
+ azs = var.azs
+ private_subnets = [cidrsubnet(var.vpc_cidr, 8, 1), cidrsubnet(var.vpc_cidr, 8, 2)]
+ public_subnets = [cidrsubnet(var.vpc_cidr, 8, 101), cidrsubnet(var.vpc_cidr, 8, 102)]
+
+ enable_nat_gateway = true
+ single_nat_gateway = true
+ one_nat_gateway_per_az = false
+ reuse_nat_ips = true
+ external_nat_ip_ids = aws_eip.nat_eip.*.id
+
+ enable_dns_hostnames = true
+ enable_dns_support = true
+
+ tags = merge(
+ var.common_tags,
+ { Name = "${var.namespace}_VPC" }
+ )
+}
+
+resource "aws_eip" "nat_eip" {
+ vpc = true
+
+ tags = merge(
+ var.common_tags,
+ { Name = "${var.namespace}_NAT_EIP" }
+ )
+}
+
+resource "aws_eip" "instance" {
+ vpc = true
+
+ tags = merge(
+ var.common_tags,
+ { Name = "${var.namespace}_Instance_EIP" }
+ )
+}
+
+resource "aws_security_group" "sec_gr_pub" {
+ name = "${var.namespace}_SecurityGroup"
+ description = "Allow traffic"
+ vpc_id = module.vpc.vpc_id
+
+ dynamic "ingress" {
+ for_each = var.allowed_ports
+ content {
+ from_port = ingress.value
+ to_port = ingress.value
+ protocol = "tcp"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ }
+
+ egress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ tags = merge(
+ var.common_tags,
+ { Name = "${var.namespace}_SecurityGroup" }
+ )
+}
diff --git a/terraform/modules/network/outputs.tf b/terraform/modules/network/outputs.tf
new file mode 100644
index 000000000..055e33dc1
--- /dev/null
+++ b/terraform/modules/network/outputs.tf
@@ -0,0 +1,15 @@
+output "vpc" {
+ value = module.vpc
+}
+
+output "sec_gr_pub_id" {
+ value = aws_security_group.sec_gr_pub.id
+}
+
+output "nat_eip" {
+ value = aws_eip.nat_eip
+}
+
+output "instance_eip" {
+ value = aws_eip.instance
+}
diff --git a/terraform/modules/network/variables.tf b/terraform/modules/network/variables.tf
new file mode 100644
index 000000000..ee3376c43
--- /dev/null
+++ b/terraform/modules/network/variables.tf
@@ -0,0 +1,28 @@
+variable "namespace" {
+ type = string
+ default = "Custom"
+}
+
+variable "azs" {
+ type = list(string)
+ description = "A list of availability zones names or ids in the region"
+ default = []
+}
+
+variable "vpc_cidr" {
+ type = string
+ description = "A /16 CIDR range definition, such as 10.11.0.0/16, that the VPC will use"
+ default = "10.11.0.0/16"
+}
+
+variable "allowed_ports" {
+ type = list(number)
+ description = "Allowed ports"
+ default = []
+}
+
+variable "common_tags" {
+ type = map
+ description = "Common tags"
+ default = {}
+}
diff --git a/terraform/modules/ssh_key/main.tf b/terraform/modules/ssh_key/main.tf
new file mode 100644
index 000000000..39b46fb65
--- /dev/null
+++ b/terraform/modules/ssh_key/main.tf
@@ -0,0 +1,14 @@
+resource "tls_private_key" "key" {
+ algorithm = "RSA"
+}
+
+resource "local_sensitive_file" "private_key" {
+ filename = "${var.key_name}.pem"
+ content = tls_private_key.key.private_key_pem
+ file_permission = "0400"
+}
+
+resource "aws_key_pair" "key_pair" {
+ key_name = var.key_name
+ public_key = tls_private_key.key.public_key_openssh
+}
diff --git a/terraform/modules/ssh_key/outputs.tf b/terraform/modules/ssh_key/outputs.tf
new file mode 100644
index 000000000..fc626a629
--- /dev/null
+++ b/terraform/modules/ssh_key/outputs.tf
@@ -0,0 +1,7 @@
+output "key_name" {
+ value = aws_key_pair.key_pair.key_name
+}
+
+output "private_key" {
+ value = tls_private_key.key.private_key_pem
+}
diff --git a/terraform/modules/ssh_key/variables.tf b/terraform/modules/ssh_key/variables.tf
new file mode 100644
index 000000000..5637bf973
--- /dev/null
+++ b/terraform/modules/ssh_key/variables.tf
@@ -0,0 +1,5 @@
+variable "key_name" {
+ type = string
+ description = "AWS SSH key name"
+ default = "bfx-ssh-key"
+}
diff --git a/terraform/modules/ssm_random_sec_param/main.tf b/terraform/modules/ssm_random_sec_param/main.tf
new file mode 100644
index 000000000..a1e0b7dac
--- /dev/null
+++ b/terraform/modules/ssm_random_sec_param/main.tf
@@ -0,0 +1,25 @@
+resource "random_password" "sec_string" {
+ length = var.length
+ special = var.special
+ number = var.number
+ lower = var.lower
+ upper = var.upper
+}
+
+resource "aws_ssm_parameter" "sec_string" {
+ name = "/${var.env}/${var.namespace}/${var.name}"
+ description = "SSM secure string ${var.name}"
+ type = "SecureString"
+ value = random_password.sec_string.result
+
+ tags = merge(
+ var.common_tags,
+ { Name = "${var.namespace}_${var.name}" }
+ )
+}
+
+data "aws_ssm_parameter" "sec_string" {
+ name = "/${var.env}/${var.namespace}/${var.name}"
+
+ depends_on = [aws_ssm_parameter.sec_string]
+}
diff --git a/terraform/modules/ssm_random_sec_param/outputs.tf b/terraform/modules/ssm_random_sec_param/outputs.tf
new file mode 100644
index 000000000..0e31a2ced
--- /dev/null
+++ b/terraform/modules/ssm_random_sec_param/outputs.tf
@@ -0,0 +1,3 @@
+output sec_string {
+ value = data.aws_ssm_parameter.sec_string.value
+}
diff --git a/terraform/modules/ssm_random_sec_param/variables.tf b/terraform/modules/ssm_random_sec_param/variables.tf
new file mode 100644
index 000000000..355a970a1
--- /dev/null
+++ b/terraform/modules/ssm_random_sec_param/variables.tf
@@ -0,0 +1,57 @@
+variable "namespace" {
+ type = string
+ default = "Custom"
+}
+
+variable "env" {
+ type = string
+ description = "Environment"
+ default = "production"
+
+ validation {
+ condition = contains(["production", "development"], var.env)
+ error_message = "The env value must be one of the following \"production\" or \"development\"."
+ }
+}
+
+variable "name" {
+ type = string
+ description = "Secure param name"
+ default = "sec_string"
+}
+
+variable "length" {
+ type = number
+ description = "Secure param length"
+ default = 16
+}
+
+variable "special" {
+ type = bool
+ description = "Should secure param contain special characters?"
+ default = false
+}
+
+variable "number" {
+ type = bool
+ description = "Should secure param contain number?"
+ default = true
+}
+
+variable "lower" {
+ type = bool
+ description = "Should secure param contain lower characters?"
+ default = true
+}
+
+variable "upper" {
+ type = bool
+ description = "Should secure param contain upper characters?"
+ default = false
+}
+
+variable "common_tags" {
+ type = map
+ description = "Common tags"
+ default = {}
+}
diff --git a/terraform/outputs.tf b/terraform/outputs.tf
new file mode 100644
index 000000000..25839f938
--- /dev/null
+++ b/terraform/outputs.tf
@@ -0,0 +1,22 @@
+output bfx_reports_framework_pub_ip {
+ value = length(module.app) > 0 ? module.app[0].bfx_reports_framework_pub_ip : null
+}
+
+output bfx_reports_framework_pub_dns {
+ value = length(module.app) > 0 ? module.app[0].bfx_reports_framework_pub_dns : null
+}
+
+output "tf_backend_state_bucket" {
+ description = "The S3 bucket to store the remote state file."
+ value = length(module.backend) > 0 ? module.backend[0].state_bucket : null
+}
+
+output "tf_backend_dynamodb_table_name" {
+ description = "The DynamoDB table name to manage lock states."
+ value = length(module.backend) > 0 ? module.backend[0].dynamodb_table_name : null
+}
+
+output "tf_backend_kms_key" {
+ description = "The KMS customer master key to encrypt state buckets."
+ value = length(module.backend) > 0 ? module.backend[0].kms_key : null
+}
diff --git a/terraform/provider.tf b/terraform/provider.tf
new file mode 100644
index 000000000..a7e0e582f
--- /dev/null
+++ b/terraform/provider.tf
@@ -0,0 +1,12 @@
+provider "aws" {
+ shared_credentials_files = ["config/credentials.conf"]
+ profile = "default"
+ region = var.aws_region
+}
+
+provider "aws" {
+ alias = "replica"
+ shared_credentials_files = ["config/credentials.conf"]
+ profile = "default"
+ region = var.aws_replica_region
+}
diff --git a/terraform/setup.sh.tpl b/terraform/setup.sh.tpl
new file mode 100755
index 000000000..f10348d74
--- /dev/null
+++ b/terraform/setup.sh.tpl
@@ -0,0 +1,77 @@
+#!/bin/bash
+
+set -euo pipefail
+
+USER_NAME="${user_name}"
+ROOT="${root_dir}"
+DB_VOLUME_DEVICE_NAME="${db_volume_device_name}"
+
+dbFolderPath="$ROOT/db"
+envFilePath="$ROOT/.env"
+
+env="${env}"
+nginxAutoindex="${nginx_autoindex}"
+repoFork="${repo_fork}"
+repoBranch="${repo_branch}"
+nginxPort="${nginx_port}"
+nginxHost="${nginx_host}"
+
+rm -rf "$ROOT"
+mkdir -p "$ROOT" 2>/dev/null
+
+git clone -b $repoBranch https://github.com/$repoFork/bfx-reports-framework.git "$ROOT"
+
+fsType="cannot"
+
+while [[ "$fsType" = "cannot" ]]; do
+ fsType=$(file -s $DB_VOLUME_DEVICE_NAME | awk '{print $2}')
+
+ if [ "$fsType" = "cannot" ]; then
+ sleep 5
+ fi
+done
+
+if [ "$fsType" = "data" ]; then
+ echo "Creating file system on $DB_VOLUME_DEVICE_NAME"
+ mkfs -t ext4 $DB_VOLUME_DEVICE_NAME
+fi
+
+rm -rf "$dbFolderPath"
+mkdir -p "$dbFolderPath" 2>/dev/null
+mount "$DB_VOLUME_DEVICE_NAME" "$dbFolderPath"
+chown $USER_NAME:$USER_NAME -R "$dbFolderPath"
+touch .gitkeep
+BLK_ID=$(blkid $DB_VOLUME_DEVICE_NAME | cut -f2 -d" ")
+echo "$BLK_ID $dbFolderPath ext4 defaults 0 2" | tee --append /etc/fstab
+
+function setConfig {
+ local filePath="$1"
+ local propName="$2"
+ local value="$3"
+
+ escapedValue=$(echo $value \
+ | sed 's/\//\\\//g' \
+ | sed 's/\+/\\\+/g' \
+ | sed 's/\./\\\./g')
+
+ sed -i "s/^$propName.*/$propName=$escapedValue/g" "$filePath"
+ grep -q "^$propName" "$filePath" \
+ || echo "$propName=$escapedValue" >> "$filePath"
+}
+
+cd "$ROOT"
+export REPO_BRANCH="$repoBranch"
+export USER="$USER_NAME"
+"$ROOT/scripts/setup.sh" "-yn"
+chown $USER_NAME:$USER_NAME -R "$ROOT"
+
+setConfig "$envFilePath" "NODE_ENV" $env
+setConfig "$envFilePath" "UI_ENV" $env
+setConfig "$envFilePath" "NGINX_ENV" $env
+setConfig "$envFilePath" "NGINX_AUTOINDEX" $nginxAutoindex
+setConfig "$envFilePath" "REPO_BRANCH" $repoBranch
+setConfig "$envFilePath" "NGINX_PORT" $nginxPort
+setConfig "$envFilePath" "NGINX_HOST" $nginxHost
+setConfig "$envFilePath" "SECRET_KEY" ""
+
+touch "$ROOT/READY"
diff --git a/terraform/terraform.tfvars.example b/terraform/terraform.tfvars.example
new file mode 100644
index 000000000..dc0e52b0d
--- /dev/null
+++ b/terraform/terraform.tfvars.example
@@ -0,0 +1,10 @@
+aws_region="eu-central-1"
+aws_instance_type="t2.medium"
+
+repo_fork="bitfinexcom"
+repo_branch="master"
+
+env="production"
+
+is_app_enabled=true
+is_backend_s3_enabled=true
diff --git a/terraform/variables.tf b/terraform/variables.tf
new file mode 100644
index 000000000..6d79e1d76
--- /dev/null
+++ b/terraform/variables.tf
@@ -0,0 +1,169 @@
+variable "aws_region" {
+ type = string
+ description = "AWS region"
+ default = "eu-central-1"
+}
+
+variable "aws_replica_region" {
+ type = string
+ description = "The AWS region to which the state bucket is replicated. Two providers must point to different AWS regions."
+ default = "eu-north-1"
+}
+
+variable "is_backend_s3_enabled" {
+ type = bool
+ description = "Set this to true to enable S3 backend."
+ default = true
+}
+
+variable "is_backend_s3_config_removed" {
+ type = bool
+ description = "Set this to true to remove S3 backend config."
+ default = false
+}
+
+variable "is_app_enabled" {
+ type = bool
+ description = "Set this to true to enable app."
+ default = true
+}
+
+variable "is_backend_s3_replication_enabled" {
+ type = bool
+ description = "Set this to true to enable S3 bucket replication in another region."
+ default = false
+}
+
+variable "is_backend_s3_bucket_force_destroyed" {
+ type = bool
+ description = "A boolean that indicates all objects should be deleted from S3 buckets so that the buckets can be destroyed without error. These objects are not recoverable."
+ default = true
+}
+
+variable "tf_lock_dynamodb_table_name" {
+ type = string
+ description = "Terraform lock DynamoDB table name"
+ default = "tf-remote-state-lock"
+}
+
+variable "namespace" {
+ type = string
+ description = "Namespace"
+ default = "BFX"
+}
+
+variable "key_name" {
+ type = string
+ description = "AWS SSH key name"
+ default = "bfx-ssh-key"
+}
+
+variable "env" {
+ type = string
+ description = "Environment"
+ default = "production"
+
+ validation {
+ condition = contains(["production", "development"], var.env)
+ error_message = "The env value must be one of the following \"production\" or \"development\"."
+ }
+}
+
+variable "db_volume_device_name" {
+ type = string
+ description = "DB volume device name"
+ default = "/dev/xvdf"
+}
+
+variable "db_volume_size" {
+ type = number
+ description = "DB volume size in Gb"
+ default = 10
+}
+
+variable "db_volume_type" {
+ type = string
+ description = "DB volume type, see https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ebs_volume#type"
+ default = "gp3"
+}
+
+variable "is_db_volume_encrypted" {
+ type = bool
+ description = "Is DB volume encrypted"
+ default = true
+}
+
+variable "customer_master_key_spec" {
+ type = string
+ description = "Key specs for KMS keys"
+ default = "SYMMETRIC_DEFAULT"
+}
+
+variable "enable_key_rotation" {
+ type = bool
+ description = "Specifies whether key rotation is enabled, AWS KMS supports automatic key rotation only for symmetric KMS keys"
+ default = true
+}
+
+variable "nginx_autoindex" {
+ type = string
+ description = "NGINX autoindex"
+ default = "on"
+
+ validation {
+ condition = contains(["on", "off"], var.nginx_autoindex)
+ error_message = "The nginx_autoindex value must be one of the following \"on\" or \"off\"."
+ }
+}
+
+variable "repo_fork" {
+ type = string
+ description = "Repository fork"
+ default = "bitfinexcom"
+}
+
+variable "repo_branch" {
+ type = string
+ description = "Repository branch"
+ default = "master"
+}
+
+
+variable "nginx_port" {
+ type = number
+ description = "NGINX port"
+ default = 80
+}
+
+variable "aws_instance_type" {
+ type = string
+ description = "AWS instance type"
+ default = "t2.medium"
+}
+
+variable "allowed_ports" {
+ type = list(number)
+ description = "Allowed ports"
+ default = [80, 443, 22]
+}
+
+variable "aws_instance_detailed_mon" {
+ type = bool
+ description = "AWS instance detailed monitoring"
+ default = true
+}
+
+variable "aws_vpc_cidr" {
+ type = string
+ description = "A /16 CIDR range definition, such as 10.11.0.0/16, that the VPC will use"
+ default = "10.11.0.0/16"
+}
+
+variable "common_tags" {
+ type = map
+ description = "Common tags"
+ default = {
+ Owner = "Bitfinex"
+ Project = "bfx-reports-framework"
+ }
+}
diff --git a/terraform/versions.tf b/terraform/versions.tf
new file mode 100644
index 000000000..d1b4ebb25
--- /dev/null
+++ b/terraform/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+ required_version = ">= 1.1.7"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = "~> 4.7.0"
+ }
+ }
+}
diff --git a/workers/loc.api/helpers/api-errors-testers.js b/workers/loc.api/helpers/api-errors-testers.js
deleted file mode 100644
index 710e85ec1..000000000
--- a/workers/loc.api/helpers/api-errors-testers.js
+++ /dev/null
@@ -1,14 +0,0 @@
-'use strict'
-
-const isEnotfoundError = (err) => {
- return /ENOTFOUND/.test(err.toString())
-}
-
-const isEaiAgainError = (err) => {
- return /EAI_AGAIN/.test(err.toString())
-}
-
-module.exports = {
- isEnotfoundError,
- isEaiAgainError
-}
diff --git a/workers/loc.api/helpers/index.js b/workers/loc.api/helpers/index.js
index fce2dac47..677615245 100644
--- a/workers/loc.api/helpers/index.js
+++ b/workers/loc.api/helpers/index.js
@@ -8,13 +8,11 @@ const {
getDateString,
isNotSyncRequired,
sumObjectsNumbers,
+ pickLowerObjectsNumbers,
sumAllObjectsNumbers,
+ pickAllLowerObjectsNumbers,
sumArrayVolumes
} = require('./utils')
-const {
- isEnotfoundError,
- isEaiAgainError
-} = require('./api-errors-testers')
const {
isSubAccountApiKeys,
getAuthFromSubAccountAuth,
@@ -28,12 +26,12 @@ module.exports = {
collObjToArr,
getDateString,
isNotSyncRequired,
- isEnotfoundError,
- isEaiAgainError,
isSubAccountApiKeys,
getAuthFromSubAccountAuth,
getSubAccountAuthFromAuth,
sumObjectsNumbers,
+ pickLowerObjectsNumbers,
sumAllObjectsNumbers,
+ pickAllLowerObjectsNumbers,
sumArrayVolumes
}
diff --git a/workers/loc.api/helpers/utils.js b/workers/loc.api/helpers/utils.js
index 2c9cb29e2..cadf98045 100644
--- a/workers/loc.api/helpers/utils.js
+++ b/workers/loc.api/helpers/utils.js
@@ -80,6 +80,48 @@ const sumObjectsNumbers = (propName, objects = []) => {
}, 0)
}
+const pickLowerObjectsNumbers = (propName, objects = []) => {
+ return objects.reduce((accum, curr) => {
+ if (!Number.isFinite(curr?.[propName])) {
+ return accum
+ }
+ if (!Number.isFinite(accum)) {
+ return curr[propName]
+ }
+
+ return curr[propName] < accum
+ ? curr[propName]
+ : accum
+ }, null)
+}
+
+const pickAllLowerObjectsNumbers = (propName, objects = []) => {
+ return objects.reduce((accum, curr) => {
+ if (typeof curr?.[propName] !== 'object') {
+ return accum
+ }
+
+ const entries = Object.entries(curr[propName])
+
+ return entries.reduce((accum, [key, val]) => {
+ if (!Number.isFinite(val)) {
+ return accum
+ }
+ if (!Number.isFinite(accum?.[key])) {
+ accum[key] = val
+
+ return accum
+ }
+
+ accum[key] = val < accum[key]
+ ? val
+ : accum[key]
+
+ return accum
+ }, accum)
+ }, {})
+}
+
const sumAllObjectsNumbers = (propName, objects = []) => {
return objects.reduce((accum, curr) => {
if (typeof curr?.[propName] !== 'object') {
@@ -165,6 +207,8 @@ module.exports = {
getDateString,
isNotSyncRequired,
sumObjectsNumbers,
+ pickLowerObjectsNumbers,
sumAllObjectsNumbers,
+ pickAllLowerObjectsNumbers,
sumArrayVolumes
}
diff --git a/workers/loc.api/service.report.framework.js b/workers/loc.api/service.report.framework.js
index 69e38c10b..12ac0c1c6 100644
--- a/workers/loc.api/service.report.framework.js
+++ b/workers/loc.api/service.report.framework.js
@@ -10,7 +10,8 @@ const {
} = require('bfx-report/workers/loc.api/errors')
const {
getTimezoneConf,
- getDataFromApi
+ getDataFromApi,
+ isENetError
} = require('bfx-report/workers/loc.api/helpers')
const ReportService = require('./service.report')
@@ -21,13 +22,12 @@ const {
checkParams,
checkParamsAuth,
isNotSyncRequired,
- isEnotfoundError,
- isEaiAgainError,
collObjToArr,
getAuthFromSubAccountAuth,
sumObjectsNumbers,
- sumAllObjectsNumbers,
- sumArrayVolumes
+ pickAllLowerObjectsNumbers,
+ sumArrayVolumes,
+ pickLowerObjectsNumbers
} = require('./helpers')
const INITIAL_PROGRESS = 'SYNCHRONIZATION_HAS_NOT_STARTED_YET'
@@ -193,10 +193,8 @@ class FrameworkReportService extends ReportService {
return true
} catch (err) {
- const isServerUnavailable = (
- isEnotfoundError(err) ||
- isEaiAgainError(err)
- )
+ const isServerUnavailable = isENetError(err)
+
const _err = isServerUnavailable
? new ServerAvailabilityError(this._conf.restUrl)
: err
@@ -1095,25 +1093,25 @@ class FrameworkReportService extends ReportService {
const objRes = {
trade_vol_30d: sumArrayVolumes(
'trade_vol_30d', arrRes),
- fees_trading_30d: sumAllObjectsNumbers(
+ fees_trading_30d: pickAllLowerObjectsNumbers(
'fees_trading_30d', arrRes),
- fees_trading_total_30d: sumObjectsNumbers(
+ fees_trading_total_30d: pickLowerObjectsNumbers(
'fees_trading_total_30d', arrRes),
- fees_funding_30d: sumAllObjectsNumbers(
+ fees_funding_30d: pickAllLowerObjectsNumbers(
'fees_funding_30d', arrRes),
- fees_funding_total_30d: sumObjectsNumbers(
+ fees_funding_total_30d: pickLowerObjectsNumbers(
'fees_funding_total_30d', arrRes),
- makerFee: sumObjectsNumbers(
+ makerFee: pickLowerObjectsNumbers(
'makerFee', arrRes),
derivMakerRebate: sumObjectsNumbers(
'derivMakerRebate', arrRes),
- takerFeeToCrypto: sumObjectsNumbers(
+ takerFeeToCrypto: pickLowerObjectsNumbers(
'takerFeeToCrypto', arrRes),
- takerFeeToStable: sumObjectsNumbers(
+ takerFeeToStable: pickLowerObjectsNumbers(
'takerFeeToStable', arrRes),
- takerFeeToFiat: sumObjectsNumbers(
+ takerFeeToFiat: pickLowerObjectsNumbers(
'takerFeeToFiat', arrRes),
- derivTakerFee: sumObjectsNumbers(
+ derivTakerFee: pickLowerObjectsNumbers(
'derivTakerFee', arrRes),
leoLev: sumObjectsNumbers(
'leoLev', arrRes),
diff --git a/workers/loc.api/sync/authenticator/index.js b/workers/loc.api/sync/authenticator/index.js
index 4455bd920..2724c0f61 100644
--- a/workers/loc.api/sync/authenticator/index.js
+++ b/workers/loc.api/sync/authenticator/index.js
@@ -4,12 +4,13 @@ const { v4: uuidv4 } = require('uuid')
const {
AuthError
} = require('bfx-report/workers/loc.api/errors')
+const {
+ isENetError
+} = require('bfx-report/workers/loc.api/helpers')
const { serializeVal } = require('../dao/helpers')
const {
- isSubAccountApiKeys,
- isEnotfoundError,
- isEaiAgainError
+ isSubAccountApiKeys
} = require('../../helpers')
const {
UserRemovingError,
@@ -233,10 +234,7 @@ class Authenticator {
auth: { apiKey, apiSecret }
})
} catch (err) {
- if (
- !isEnotfoundError(err) &&
- !isEaiAgainError(err)
- ) {
+ if (!isENetError(err)) {
throw err
}
}
diff --git a/workers/loc.api/ws-transport/index.js b/workers/loc.api/ws-transport/index.js
index 4bb95a280..b63a7c860 100644
--- a/workers/loc.api/ws-transport/index.js
+++ b/workers/loc.api/ws-transport/index.js
@@ -250,7 +250,7 @@ class WSTransport {
)
}
- _sendToOne (socket, sid, action, err, result = null) {
+ _sendToOne (socket, action, err, result = null) {
this.responder(
() => {
if (err) {
@@ -263,7 +263,7 @@ class WSTransport {
{},
(err, res) => {
const _res = this.transport.format(
- [sid, err, { ...res, action }]
+ [null, err, { ...res, action }]
)
socket.send(_res)
@@ -326,9 +326,9 @@ class WSTransport {
continue
}
- this._sendToOne(socket, sid, action, null, res)
+ this._sendToOne(socket, action, null, res)
} catch (err) {
- this._sendToOne(socket, sid, action, err)
+ this._sendToOne(socket, action, err)
}
}