diff --git a/.cargo/config.toml b/.cargo/config.toml deleted file mode 100644 index d67b66e2b..000000000 --- a/.cargo/config.toml +++ /dev/null @@ -1,2 +0,0 @@ -[build] -target = "x86_64-unknown-linux-musl" diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml index 7c69714a6..9e277f6f8 100644 --- a/.github/workflows/audit.yml +++ b/.github/workflows/audit.yml @@ -20,6 +20,10 @@ on: jobs: security-audit: runs-on: ubuntu-latest + permissions: + issues: write + checks: write + steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/lint-test-build.yml b/.github/workflows/lint-test-build.yml index 736588b34..485afae29 100644 --- a/.github/workflows/lint-test-build.yml +++ b/.github/workflows/lint-test-build.yml @@ -1,6 +1,7 @@ name: Lint, Test & Build on: + workflow_dispatch: pull_request: branches: - main @@ -15,10 +16,10 @@ concurrency: cancel-in-progress: true jobs: - lint-test-rust: - name: Run Lint + Test Rust + lint-test: + name: Run Lint + Test runs-on: ubuntu-latest - container: ghcr.io/scuffletv/build:1.67.1 + container: ghcr.io/scuffletv/build:425e9d58cd6fab8e3d202681188c54b55c9e71f1 services: postgres: image: postgres:15.2 @@ -51,9 +52,9 @@ jobs: /usr/local/cargo/registry/index/ /usr/local/cargo/registry/cache/ /usr/local/cargo/git/db/ - key: musl-rust-registry-${{ hashFiles('**/Cargo.lock', '**/Cargo.toml') }} + key: gnu-rust-registry-${{ hashFiles('**/Cargo.lock', '**/Cargo.toml') }} restore-keys: | - musl-rust-registry- + gnu-rust-registry- - name: Set up cargo target cache uses: actions/cache@v3 @@ -61,77 +62,37 @@ jobs: with: path: | target/ - key: musl-rust-target-lint-test-rust-${{ hashFiles('**/Cargo.lock', '**/Cargo.toml') }} + key: gnu-rust-target-lint-test-rust-${{ hashFiles('**/Cargo.lock', '**/Cargo.toml') }} restore-keys: | - musl-rust-target-lint-test-rust- - - - name: Install sqlx-cli - run: | - curl -L https://github.com/ScuffleTV/ci-binaries/releases/download/sqlx-cli/sqlx-cli.tar.gz | tar -xz -C $CARGO_HOME/bin + gnu-rust-target-lint-test-rust- - name: Install dependencies - run: just setup-deps + run: mask bootstrap --no-db --no-docker --no-env --no-stack --no-rust - name: Run migrations - run: just db-migrate + run: mask db migrate - name: Run Lint - run: just lint + run: mask lint - name: Run Test Rust - run: just test-rust - - test-js: - name: Run Tests JavaScript - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 + run: mask test --no-js - - name: Setup Node - uses: actions/setup-node@v3 + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3 with: - cache: "yarn" - node-version: 18 - - - name: Setup Rust Wasm - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - target: wasm32-unknown-unknown - override: true - - - name: Set up cargo cache - uses: actions/cache@v3 - continue-on-error: false - with: - path: | - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - target/ - key: gnu-rust-test-js-${{ hashFiles('**/Cargo.lock', '**/Cargo.toml') }} - restore-keys: | - gnu-rust-test-js- - - - uses: taiki-e/install-action@just - - - name: Install dependencies - run: just setup-deps - - - name: Setup tests - run: just setup-tests + token: ${{ secrets.CODECOV_TOKEN }} + files: lcov.info + fail_ci_if_error: true - name: Run Test JavaScript - run: just test-js + run: mask test --no-rust build: name: Run Build runs-on: ubuntu-latest - container: ghcr.io/scuffletv/build:1.67.1 - needs: [lint-test-rust, test-js] + container: ghcr.io/scuffletv/build:425e9d58cd6fab8e3d202681188c54b55c9e71f1 + needs: [lint-test] steps: - uses: actions/checkout@v3 @@ -152,9 +113,9 @@ jobs: /usr/local/cargo/registry/index/ /usr/local/cargo/registry/cache/ /usr/local/cargo/git/db/ - key: musl-rust-registry-${{ hashFiles('**/Cargo.lock', '**/Cargo.toml') }} + key: gnu-rust-registry-${{ hashFiles('**/Cargo.lock', '**/Cargo.toml') }} restore-keys: | - musl-rust-registry- + gnu-rust-registry- - name: Set up cargo target cache uses: actions/cache@v3 @@ -162,12 +123,184 @@ jobs: with: path: | target/ - key: musl-rust-target-build-${{ hashFiles('**/Cargo.lock', '**/Cargo.toml') }} + key: gnu-rust-target-build-${{ hashFiles('**/Cargo.lock', '**/Cargo.toml') }} restore-keys: | - musl-rust-target-build- + gnu-rust-target-build- - name: Install dependencies - run: just setup-deps + run: mask bootstrap --no-db --no-docker --no-env --no-js-tests --no-stack --no-rust - name: Run Build - run: just build + run: mask build + + - name: Upload build artifacts + uses: actions/upload-artifact@v3 + with: + name: build + path: | + target/x86_64-unknown-linux-gnu/release/api + target/x86_64-unknown-linux-gnu/release/edge + target/x86_64-unknown-linux-gnu/release/ingest + target/x86_64-unknown-linux-gnu/release/edge + target/x86_64-unknown-linux-gnu/release/transcoder + frontend/website/build + + docker: + name: Build docker images + runs-on: ubuntu-latest + needs: [build] + permissions: + contents: read + packages: write + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Download build artifacts + uses: actions/download-artifact@v3 + with: + name: build + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2.4.1 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2.1.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push API image + id: docker_build_api + uses: docker/build-push-action@v4.0.0 + with: + context: . + file: ./docker/api.Dockerfile + load: true + tags: ghcr.io/scuffletv/api:${{ github.sha }} + + - name: Build and push Edge image + id: docker_build_edge + uses: docker/build-push-action@v4.0.0 + with: + context: . + file: ./docker/edge.Dockerfile + load: true + tags: ghcr.io/scuffletv/edge:${{ github.sha }} + + - name: Build and push Ingest image + id: docker_build_ingest + uses: docker/build-push-action@v4.0.0 + with: + context: . + file: ./docker/ingest.Dockerfile + load: true + tags: ghcr.io/scuffletv/ingest:${{ github.sha }} + + - name: Build and push Transcoder image + id: docker_build_transcoder + uses: docker/build-push-action@v4.0.0 + with: + context: . + file: ./docker/transcoder.Dockerfile + load: true + tags: ghcr.io/scuffletv/transcoder:${{ github.sha }} + + - name: Build and push Website image + id: docker_build_website + uses: docker/build-push-action@v4.0.0 + with: + context: . + file: ./docker/website.Dockerfile + load: true + tags: ghcr.io/scuffletv/website:${{ github.sha }} + + - name: Scan API image + uses: aquasecurity/trivy-action@master + if: ${{ always() && steps.docker_build_api.outcome == 'success' }} + with: + image-ref: ghcr.io/scuffletv/api:${{ github.sha }} + format: "table" + exit-code: "1" + ignore-unfixed: true + vuln-type: "os,library" + severity: "CRITICAL,HIGH" + + - name: Scan Edge image + uses: aquasecurity/trivy-action@master + if: ${{ always() && steps.docker_build_edge.outcome == 'success' }} + with: + image-ref: ghcr.io/scuffletv/edge:${{ github.sha }} + format: "table" + exit-code: "1" + ignore-unfixed: true + vuln-type: "os,library" + severity: "CRITICAL,HIGH" + + - name: Scan Ingest image + uses: aquasecurity/trivy-action@master + if: ${{ always() && steps.docker_build_ingest.outcome == 'success' }} + with: + image-ref: ghcr.io/scuffletv/ingest:${{ github.sha }} + format: "table" + exit-code: "1" + ignore-unfixed: true + vuln-type: "os,library" + severity: "CRITICAL,HIGH" + + - name: Scan Transcoder image + uses: aquasecurity/trivy-action@master + if: ${{ always() && steps.docker_build_transcoder.outcome == 'success' }} + with: + image-ref: ghcr.io/scuffletv/transcoder:${{ github.sha }} + format: "table" + exit-code: "1" + ignore-unfixed: true + vuln-type: "os,library" + severity: "CRITICAL,HIGH" + + - name: Scan Website image + uses: aquasecurity/trivy-action@master + if: ${{ always() && steps.docker_build_website.outcome == 'success' }} + with: + image-ref: ghcr.io/scuffletv/website:${{ github.sha }} + format: "table" + exit-code: "1" + ignore-unfixed: true + vuln-type: "os,library" + severity: "CRITICAL,HIGH" + + - name: Tag images and push + if: ${{ (github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/feature/'))) || github.event_name == 'workflow_dispatch' }} + # If the push is to the main branch, tag the image as latest + # If the workflow is triggered by a workflow_dispatch event, tag the image as workflow_dispatch + # Otherwise, tag the image with the branch name, in the format of feature-branch-name + env: + TAG: ${{ github.ref == 'refs/heads/main' && 'latest' || github.event_name == 'workflow_dispatch' && 'workflow_dispatch' || github.ref_name }} + run: | + # We need to replace the / in the branch name with a - so that it can be used as a tag + TAG="${TAG//\//-}" + + docker tag ghcr.io/scuffletv/api:${{ github.sha }} ghcr.io/scuffletv/api:$TAG + docker tag ghcr.io/scuffletv/edge:${{ github.sha }} ghcr.io/scuffletv/edge:$TAG + docker tag ghcr.io/scuffletv/ingest:${{ github.sha }} ghcr.io/scuffletv/ingest:$TAG + docker tag ghcr.io/scuffletv/transcoder:${{ github.sha }} ghcr.io/scuffletv/transcoder:$TAG + docker tag ghcr.io/scuffletv/website:${{ github.sha }} ghcr.io/scuffletv/website:$TAG + + docker push ghcr.io/scuffletv/api:${{ github.sha }} + docker push ghcr.io/scuffletv/api:$TAG + + docker push ghcr.io/scuffletv/edge:${{ github.sha }} + docker push ghcr.io/scuffletv/edge:$TAG + + docker push ghcr.io/scuffletv/ingest:${{ github.sha }} + docker push ghcr.io/scuffletv/ingest:$TAG + + docker push ghcr.io/scuffletv/transcoder:${{ github.sha }} + docker push ghcr.io/scuffletv/transcoder:$TAG + + docker push ghcr.io/scuffletv/website:${{ github.sha }} + docker push ghcr.io/scuffletv/website:$TAG diff --git a/.gitignore b/.gitignore index 0608c861b..2c79d4a70 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,8 @@ target/ !.vscode/extensions.json node_modules/ .env* -dev-stack/stack.docker-compose.yaml +dev-stack/docker-compose.yml +*.log +.DS_Store +*.lcov +lcov.info diff --git a/.husky/pre-commit b/.husky/pre-commit index 946349d66..c814f2328 100755 --- a/.husky/pre-commit +++ b/.husky/pre-commit @@ -1,4 +1,4 @@ #!/usr/bin/env sh . "$(dirname -- "$0")/_/husky.sh" -just lint +mask lint diff --git a/.vscode/extensions.json b/.vscode/extensions.json index e2ba250a8..184e9a6a1 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -7,6 +7,7 @@ "prisma.prisma", "swellaby.rust-pack", "svelte.svelte-vscode", - "zxh404.vscode-proto3" + "zxh404.vscode-proto3", + "ryanluker.vscode-coverage-gutters" ] } diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5b50e35e1..ef69c4184 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -30,7 +30,7 @@ You can find instructions on how to do that [here](https://devblogs.microsoft.co - [Docker](https://www.docker.com/) - [Docker Compose V2](https://docs.docker.com/compose/install) - [Rust](https://www.rust-lang.org/tools/install) -- [Just](https://just.systems/) +- [Mask](https://github.com/jacobdeichert/mask) - [Musl](https://musl.libc.org/) ### For Ubuntu @@ -75,10 +75,10 @@ source $HOME/.cargo/env export PATH="$HOME/.cargo/bin:$HOME/.yarn/bin:$PATH" ``` -Installing Just +Installing Mask ``` -cargo install just +cargo install mask ``` ## Setting up the project @@ -88,9 +88,18 @@ Once you have everything installed, you can clone the project and install the de ```bash git clone --recurse-submodules https://github.com/ScuffleTV/scuffle.git scuffle cd scuffle -just setup +mask bootstrap ``` +The boostrap command will setup the project for you. + +This includes: + +- Installing all the dependencies +- Setting up the database +- Setting up the local stack +- Setting up .env files + ## Development Database We use Postgres for our database. @@ -98,49 +107,39 @@ We use Postgres for our database. You can run a local instance of Postgres with the following command: ```bash -just db-up +mask db up ``` To shut down the local instance of Postgres you can run the following command: ```bash -just db-down +mask db down ``` ### Database Migrations We use sqlx-cli to manage our database migrations. -You can create a new migration with the following command: - -```bash -just db-migrate-create -``` - -Then you can find the SQL for the migration in the [migrations](./backend/migrations) folder. - You can run the migrations with the following command: ```bash -just db-migrate +mask db migrate ``` -### Creating Database Migrations - -To create a new migration, you can use the `just db-migrate-create` command. +You can create a new migration with the following command: ```bash -just db-migrate-create +mask db migrate add "migration name" ``` -This will create a new migration file in the [migrations](./backend/migrations) folder. +Then you can find the SQL for the migration in the [migrations](./backend/migrations) folder. You can then edit the up migration file to add your SQL. You must also provide a down migration file so we can rollback the migration. You will then be prompted to rerun the prepare command ```bash -just db-prepare +mask db prepare ``` This will run the migrations and generate the SQLx code for the database. So that compile time querying can be used. @@ -150,28 +149,28 @@ This will run the migrations and generate the SQLx code for the database. So tha You can setup a local stack with the following command: ```bash -just stack-init +mask stack init ``` You need to have fully built local environment before running this command. You can do that with the following command: ```bash -just build +mask build ``` Or if you want to build it inside a container you can run: ```bash -just build-container +mask build --container ``` Then to start it run ``` -just stack-up +mask stack up ``` -You can modify the stack by editing `./dev-stack/docker-compose.yaml` file generated by `just stack-init`. +You can modify the stack by editing `./dev-stack/docker-compose.yml` file generated by `mask stack init`. ## Monorepo diff --git a/Cargo.lock b/Cargo.lock index 33246c568..cbc375892 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -34,9 +34,13 @@ version = "0.1.0" dependencies = [ "anyhow", "common", + "dotenvy", "hyper", + "routerify", "serde", + "serde_json", "sqlx", + "tempfile", "tokio", "tracing", ] @@ -167,6 +171,8 @@ dependencies = [ "config", "serde", "sqlx", + "tempfile", + "tokio", "tracing", "tracing-subscriber", ] @@ -373,6 +379,15 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + [[package]] name = "fnv" version = "1.0.7" @@ -1089,6 +1104,15 @@ version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + [[package]] name = "ring" version = "0.16.20" @@ -1115,6 +1139,19 @@ dependencies = [ "serde", ] +[[package]] +name = "routerify" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "496c1d3718081c45ba9c31fbfc07417900aa96f4070ff90dc29961836b7a9945" +dependencies = [ + "http", + "hyper", + "lazy_static", + "percent-encoding", + "regex", +] + [[package]] name = "rust-ini" version = "0.18.0" @@ -1411,6 +1448,20 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "tempfile" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +dependencies = [ + "cfg-if", + "fastrand", + "libc", + "redox_syscall", + "remove_dir_all", + "winapi", +] + [[package]] name = "termcolor" version = "1.2.0" diff --git a/Justfile b/Justfile deleted file mode 100644 index 70072c2ff..000000000 --- a/Justfile +++ /dev/null @@ -1,107 +0,0 @@ -set dotenv-load - -arch := `uname -m | sed 's/amd64/x86_64/' | sed 's/arm64/aarch64/'` - -build: - yarn workspace website build - cargo build --release - -build-container: env-backup - docker run --rm -v $(pwd):/pwd -w /pwd ghcr.io/scuffletv/base-build:1.66.1 just build - -env-backup: - test -f .env && (\ - mv .env .env.bak \ - ) || true - -format: - yarn format - yarn workspace website format - cargo fmt --all - cargo clippy --fix --allow-dirty --allow-staged - cargo clippy --fix --allow-dirty --allow-staged --package player --target wasm32-unknown-unknown - -lint: - yarn lint - yarn workspace website lint - cargo clippy - cargo clippy --package player --target wasm32-unknown-unknown - cargo fmt --all --check - cargo sqlx prepare --check --merged -- --all-targets --all-features - -test: test-rust test-js - -test-rust: - cargo test - -test-js: - yarn workspace website test - -audit: - cargo audit - yarn audit - -setup: setup-deps env - cargo install cargo-watch - cargo install sqlx-cli - cargo install cargo-audit --features=fix,vendored-openssl - rustup target add wasm32-unknown-unknown - rustup target add {{arch}}-unknown-linux-musl - -setup-deps: - yarn - -setup-tests: - yarn playwright install - -clean: - cargo clean - yarn workspace website clean - -db-migrate: - sqlx database create - sqlx migrate run --source ./backend/migrations - -db-prepare: - cargo sqlx prepare --merged -- --all-targets --all-features - yarn prettier --write sqlx-data.json - -db-migrate-create *ARGS: - sqlx migrate add "{{ ARGS }}" --source ./backend/migrations -r - -db-rollback: - sqlx migrate revert --source ./backend/migrations - -db-reset: - sqlx database reset --source ./backend/migrations - just db-migrate - -db-up: - docker network create --driver bridge scuffle-dev || true - docker compose --file ./dev-stack/db.docker-compose.yaml up -d - just db-migrate - -env: - test -f .env || (\ - test -f .env.bak && (\ - mv .env.bak .env \ - ) || (\ - echo "DATABASE_URL=postgres://postgres:postgres@localhost:5432/scuffle-dev" > .env \ - ) \ - ) - -db-down: - docker compose --file ./dev-stack/db.docker-compose.yaml down - -stack-init: - cp ./dev-stack/stack-example.docker-compose.yaml ./dev-stack/stack.docker-compose.yaml - -stack-up: - docker network create --driver bridge scuffle-dev || true - docker compose --file ./dev-stack/stack.docker-compose.yaml up -d --build - -stack-down: - docker compose --file ./dev-stack/stack.docker-compose.yaml down - -stack-logs *ARGS: - docker compose --file ./dev-stack/stack.docker-compose.yaml logs {{ ARGS }} diff --git a/README.md b/README.md index c0febe952..4a321d0b8 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ # Scuffle +[![codecov](https://codecov.io/gh/ScuffleTV/scuffle/branch/main/graph/badge.svg?token=LJCYSZR4IV)](https://codecov.io/gh/ScuffleTV/scuffle) + ## Welcome to Scuffle! Scuffle is an opensource live streaming platform. diff --git a/backend/api/Cargo.toml b/backend/api/Cargo.toml index 86d8eae40..99b1d66e9 100644 --- a/backend/api/Cargo.toml +++ b/backend/api/Cargo.toml @@ -15,3 +15,9 @@ serde = { version = "1.0.152", features = ["derive"] } hyper = { version = "0.14.24", features = ["full"] } common = { path = "../../common" } sqlx = { version = "0.6.2", features = ["postgres", "runtime-tokio-rustls", "json", "chrono"] } +routerify = "3.0.0" +serde_json = "1.0.93" + +[dev-dependencies] +tempfile = "3.3.0" +dotenvy = "0.15.6" diff --git a/backend/api/src/api/mod.rs b/backend/api/src/api/mod.rs index 41fe9c4a6..b079b3c75 100644 --- a/backend/api/src/api/mod.rs +++ b/backend/api/src/api/mod.rs @@ -1,42 +1,56 @@ -use std::{net::SocketAddr, sync::Arc}; - use anyhow::Result; -use hyper::{service::service_fn, Body, Request, Response, StatusCode}; -use tokio::net::TcpListener; -use tracing::instrument; +use hyper::{server::conn::Http, Body}; +use routerify::{RequestServiceBuilder, Router}; +use std::{convert::Infallible, net::SocketAddr, sync::Arc}; +use tokio::{net::TcpSocket, select}; use crate::global::GlobalState; -#[instrument(name = "hello_world", skip(req), fields(method = req.method().to_string(), path = &req.uri().path()))] -async fn hello_world(req: Request) -> Result> { - tracing::debug!("Hii there!"); +mod v1; - Ok(Response::new("Hello, World".into())) +pub fn routes(global: Arc) -> Router { + Router::builder() + .data(global) + .scope("/v1", v1::routes()) + .build() + .unwrap() } -pub async fn run(config: Arc) -> Result<()> { - let addr: SocketAddr = config.config.bind_address.parse()?; +pub async fn run(global: Arc) -> Result<()> { + let addr: SocketAddr = global.config.bind_address.parse()?; tracing::info!("Listening on {}", addr); - let listener = TcpListener::bind(&addr).await?; + let socket = if addr.is_ipv6() { + TcpSocket::new_v6()? + } else { + TcpSocket::new_v4()? + }; + socket.set_reuseaddr(true)?; + socket.set_reuseport(true)?; + socket.bind(addr)?; + let listener = socket.listen(1024)?; + + let request_service = RequestServiceBuilder::new(routes(global.clone())) + .expect("failed to build request service"); loop { - let (socket, _) = listener.accept().await?; - - tracing::debug!("Accepted connection from {}", socket.peer_addr()?); - - let conn = hyper::server::conn::Http::new().serve_connection( - socket, - service_fn(|req| async { - match req.uri().path() { - "/hello" => hello_world(req).await, - _ => Ok(Response::builder() - .status(StatusCode::NOT_FOUND) - .body("Not Found".into())?), - } - }), - ); - - tokio::spawn(conn); + select! { + _ = global.ctx.done() => { + tracing::info!("Shutting down"); + return Ok(()); + }, + r = listener.accept() => { + let (socket, addr) = r?; + tracing::debug!("Accepted connection from {}", addr); + + tokio::spawn(Http::new().serve_connection( + socket, + request_service.build(addr), + )); + }, + } } } + +#[cfg(test)] +mod tests; diff --git a/backend/api/src/api/tests.rs b/backend/api/src/api/tests.rs new file mode 100644 index 000000000..8f6f7d103 --- /dev/null +++ b/backend/api/src/api/tests.rs @@ -0,0 +1,142 @@ +use std::time::Duration; + +use common::{context::Context, logging}; +use hyper::{Client, StatusCode}; + +use crate::config::AppConfig; + +use super::*; + +#[tokio::test] +async fn test_api_v6() { + let db = sqlx::PgPool::connect(&std::env::var("DATABASE_URL").expect("DATABASE_URL not set")) + .await + .expect("failed to connect to database"); + + // We need to initalize logging + logging::init("api=debug").expect("failed to initialize logging"); + + let (ctx, handler) = Context::new(); + + let global = Arc::new(GlobalState { + config: AppConfig { + bind_address: "[::]:8081".to_string(), + database_url: "".to_string(), + log_level: "api=debug".to_string(), + config_file: "".to_string(), + }, + ctx, + db, + }); + + let handle = tokio::spawn(run(global)); + + // We need to wait for the server to start + tokio::time::sleep(Duration::from_millis(300)).await; + + let client = Client::new(); + + let resp = client + .get( + "http://localhost:8081/v1/health" + .to_string() + .parse() + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(resp.status(), StatusCode::OK); + let body = hyper::body::to_bytes(resp.into_body()).await.unwrap(); + assert_eq!(body, "OK"); + + // The client uses Keep-Alive, so we need to drop it to release the global context + drop(client); + + tokio::time::timeout(Duration::from_secs(1), handler.cancel()) + .await + .expect("failed to cancel context"); + tokio::time::timeout(Duration::from_secs(1), handle) + .await + .expect("failed to cancel api") + .expect("api failed") + .expect("api failed"); +} + +#[tokio::test] +async fn test_api_v4() { + let db = sqlx::PgPool::connect(&std::env::var("DATABASE_URL").expect("DATABASE_URL not set")) + .await + .expect("failed to connect to database"); + + // We need to initalize logging + logging::init("api=debug").expect("failed to initialize logging"); + + let (ctx, handler) = Context::new(); + + let global = Arc::new(GlobalState { + config: AppConfig { + bind_address: "0.0.0.0:8081".to_string(), + database_url: "".to_string(), + log_level: "api=debug".to_string(), + config_file: "".to_string(), + }, + ctx, + db, + }); + + let handle = tokio::spawn(run(global)); + + // We need to wait for the server to start + tokio::time::sleep(Duration::from_millis(300)).await; + + let client = Client::new(); + + let resp = client + .get("http://localhost:8081/v1/health".parse().unwrap()) + .await + .unwrap(); + assert_eq!(resp.status(), StatusCode::OK); + let body = hyper::body::to_bytes(resp.into_body()).await.unwrap(); + assert_eq!(body, "OK"); + + // The client uses Keep-Alive, so we need to drop it to release the global context + drop(client); + + tokio::time::timeout(Duration::from_secs(1), handler.cancel()) + .await + .expect("failed to cancel context"); + tokio::time::timeout(Duration::from_secs(1), handle) + .await + .expect("failed to cancel api") + .expect("api failed") + .expect("api failed"); +} + +#[tokio::test] +async fn test_api_bad_bind() { + let db = sqlx::PgPool::connect(&std::env::var("DATABASE_URL").expect("DATABASE_URL not set")) + .await + .expect("failed to connect to database"); + + // We need to initalize logging + logging::init("api=debug").expect("failed to initialize logging"); + + let (ctx, handler) = Context::new(); + + let global = Arc::new(GlobalState { + config: AppConfig { + bind_address: "????".to_string(), + database_url: "".to_string(), + log_level: "api=debug".to_string(), + config_file: "".to_string(), + }, + ctx, + db, + }); + + assert!(run(global).await.is_err()); + + tokio::time::timeout(Duration::from_secs(1), handler.cancel()) + .await + .expect("failed to cancel context"); +} diff --git a/backend/api/src/api/v1/health.rs b/backend/api/src/api/v1/health.rs new file mode 100644 index 000000000..99e029bf5 --- /dev/null +++ b/backend/api/src/api/v1/health.rs @@ -0,0 +1,16 @@ +use std::convert::Infallible; + +use hyper::{Body, Request, Response, StatusCode}; +use routerify::Router; + +async fn health(_: Request) -> Result, Infallible> { + tracing::debug!("Health check"); + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from("OK")) + .expect("failed to build health response")) +} + +pub fn routes() -> Router { + Router::builder().get("/", health).build().unwrap() +} diff --git a/backend/api/src/api/v1/mod.rs b/backend/api/src/api/v1/mod.rs new file mode 100644 index 000000000..34b846b72 --- /dev/null +++ b/backend/api/src/api/v1/mod.rs @@ -0,0 +1,15 @@ +use std::convert::Infallible; + +use hyper::Body; +use routerify::Router; + +mod health; +mod users; + +pub fn routes() -> Router { + Router::builder() + .scope("/health", health::routes()) + .scope("/users", users::routes()) + .build() + .unwrap() +} diff --git a/backend/api/src/api/v1/users/mod.rs b/backend/api/src/api/v1/users/mod.rs new file mode 100644 index 000000000..35dd261a7 --- /dev/null +++ b/backend/api/src/api/v1/users/mod.rs @@ -0,0 +1,148 @@ +use std::convert::Infallible; +use std::sync::Arc; + +use hyper::body::HttpBody; +use hyper::{Body, Request, Response, StatusCode}; +use routerify::prelude::RequestExt; +use routerify::Router; + +use crate::global::GlobalState; + +#[derive(serde::Deserialize)] +struct FetchUserRequest { + id: Vec, +} + +#[derive(serde::Serialize)] +pub struct ErrorResponse { + code: i32, + message: String, +} + +impl ErrorResponse { + pub fn new(code: i32, message: String) -> Self { + Self { code, message } + } + + pub fn to_json(&self) -> String { + serde_json::to_string(self).expect("failed to serialize error response") + } +} + +#[derive(serde::Serialize, serde::Deserialize)] +struct FetchUserResponse { + data: Vec, + count: usize, +} + +impl FetchUserResponse { + pub fn new(data: Vec) -> Self { + let count = data.len(); + Self { data, count } + } + + pub fn to_json(&self) -> String { + serde_json::to_string(self).expect("failed to serialize fetch user response") + } +} + +#[derive(serde::Serialize, serde::Deserialize)] +struct User { + id: String, + username: String, + created_at: String, +} + +async fn fetch_users(mut req: Request) -> Result, Infallible> { + // Get the request body as json and deserialize it into a GetUserRequest + let Some(Ok(data)) = req.body_mut().data().await else { + return Ok(Response::builder() + .status(StatusCode::BAD_REQUEST) + .header("content-type", "application/json") + .body(ErrorResponse::new(400, "body has no data".to_string()).to_json().into()) + .expect("failed to build fetch user response")); + }; + + let Ok(request) = serde_json::from_slice::(&data) else { + return Ok(Response::builder() + .status(StatusCode::BAD_REQUEST) + .header("content-type", "application/json") + .body(ErrorResponse::new(400, "body is not valid json".to_string()).to_json().into()) + .expect("failed to build fetch user response")); + }; + + if request.id.is_empty() { + return Ok(Response::builder() + .status(StatusCode::BAD_REQUEST) + .header("content-type", "application/json") + .body( + ErrorResponse::new(400, "request has no ids".to_string()) + .to_json() + .into(), + ) + .expect("failed to build fetch user response")); + } + + if request.id.len() > 100 { + return Ok(Response::builder() + .status(StatusCode::BAD_REQUEST) + .header("content-type", "application/json") + .body( + ErrorResponse::new( + 400, + "you cannot request more than 100 users in a single request".to_string(), + ) + .to_json() + .into(), + ) + .expect("failed to build fetch user response")); + } + + // Do something with the request + + let global = req.data::>().unwrap(); + + let result = match sqlx::query!( + r#" + SELECT id, username, created_at + FROM users + WHERE id = ANY($1) + "#, + &request.id + ) + .map(|row| User { + id: row.id.to_string(), + username: row.username, + created_at: row.created_at.to_string(), + }) + .fetch_all(&global.db) + .await + { + Ok(result) => result, + Err(err) => { + tracing::error!("failed to fetch users: {}", err); + return Ok(Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .header("content-type", "application/json") + .body( + ErrorResponse::new(500, "failed to fetch users".to_string()) + .to_json() + .into(), + ) + .expect("failed to build fetch user response")); + } + }; + + Ok(Response::builder() + .status(StatusCode::OK) + .header("content-type", "application/json") + .body(FetchUserResponse::new(result).to_json().into()) + .expect("failed to build fetch user response")) +} + +pub fn routes() -> Router { + Router::builder().post("/", fetch_users).build().unwrap() +} + +#[cfg(test)] +mod tests; diff --git a/backend/api/src/api/v1/users/tests.rs b/backend/api/src/api/v1/users/tests.rs new file mode 100644 index 000000000..c4bb1f085 --- /dev/null +++ b/backend/api/src/api/v1/users/tests.rs @@ -0,0 +1,190 @@ +use std::time::Duration; + +use common::{context::Context, logging}; +use hyper::{body::Bytes, Client}; + +use crate::{api::run, config::AppConfig}; + +use super::*; + +#[tokio::test] +async fn test_user_api() { + let db = sqlx::PgPool::connect(&std::env::var("DATABASE_URL").expect("DATABASE_URL not set")) + .await + .expect("failed to connect to database"); + + // We need to initalize logging + logging::init("api=debug").expect("failed to initialize logging"); + + let (ctx, handler) = Context::new(); + + let global = Arc::new(GlobalState { + config: AppConfig { + bind_address: "[::]:8081".to_string(), + database_url: "".to_string(), + log_level: "api=debug".to_string(), + config_file: "".to_string(), + }, + ctx, + db, + }); + + sqlx::query!("DELETE FROM users") + .execute(&global.db) + .await + .expect("failed to delete users"); + sqlx::query!("INSERT INTO users (id, username, password_hash, email, email_verified, created_at, last_login_at) VALUES + (1, 'admin', 'abc', 'xyz@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'), + (2, 'user', 'abc2', 'xyz2@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'), + (3, 'user1', 'abc3', 'xyz3@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'), + (4, 'user2', 'abc4', 'xyz4@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'), + (5, 'user3', 'abc5', 'xyz5@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'), + (6, 'user4', 'abc6', 'xyz6@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'), + (7, 'user5', 'abc7', 'xyz7@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'), + (8, 'user6', 'abc8', 'xyz8@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'), + (9, 'user7', 'abc9', 'xyz9@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'), + (10, 'user8', 'abc10', 'xyz10@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'), + (11, 'user9', 'abc11', 'xyz11@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00');").execute(&global.db).await.expect("failed to insert users"); + + let handle = tokio::spawn(run(global.clone())); + + // We need to wait for the server to start + tokio::time::sleep(Duration::from_millis(300)).await; + + let client = Client::new(); + + let resp = client + .get("http://localhost:8081/v1/users".parse().unwrap()) + .await + .unwrap(); + assert_eq!(resp.status(), StatusCode::NOT_FOUND); + + let req = Request::builder() + .method("POST") + .uri("http://localhost:8081/v1/users") + .body(Body::empty()) + .unwrap(); + + let resp = client.request(req).await.unwrap(); + assert_eq!(resp.status(), StatusCode::BAD_REQUEST); + assert_eq!( + resp.headers().get("content-type").unwrap(), + "application/json" + ); + + let body = hyper::body::to_bytes(resp.into_body()).await.unwrap(); + assert_eq!( + body, + Bytes::from("{\"code\":400,\"message\":\"body has no data\"}") + ); + + let req = Request::builder() + .method("POST") + .uri("http://localhost:8081/v1/users") + .body(Body::from("abc")) + .unwrap(); + + let resp = client.request(req).await.unwrap(); + assert_eq!(resp.status(), StatusCode::BAD_REQUEST); + assert_eq!( + resp.headers().get("content-type").unwrap(), + "application/json" + ); + + let body = hyper::body::to_bytes(resp.into_body()).await.unwrap(); + assert_eq!( + body, + Bytes::from("{\"code\":400,\"message\":\"body is not valid json\"}") + ); + + let req = Request::builder() + .method("POST") + .uri("http://localhost:8081/v1/users") + .body(Body::from("{\"id\":[]}")) + .unwrap(); + + let resp = client.request(req).await.unwrap(); + assert_eq!(resp.status(), StatusCode::BAD_REQUEST); + assert_eq!( + resp.headers().get("content-type").unwrap(), + "application/json" + ); + + let body = hyper::body::to_bytes(resp.into_body()).await.unwrap(); + assert_eq!( + body, + Bytes::from("{\"code\":400,\"message\":\"request has no ids\"}") + ); + + let req = Request::builder() + .method("POST") + .uri("http://localhost:8081/v1/users") + .body(Body::from("{\"id\":[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,11,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]}")) + .unwrap(); + + let resp = client.request(req).await.unwrap(); + assert_eq!(resp.status(), StatusCode::BAD_REQUEST); + assert_eq!( + resp.headers().get("content-type").unwrap(), + "application/json" + ); + + let body = hyper::body::to_bytes(resp.into_body()).await.unwrap(); + assert_eq!(body.to_vec(), b"{\"code\":400,\"message\":\"you cannot request more than 100 users in a single request\"}"); + + let req = Request::builder() + .method("POST") + .uri("http://localhost:8081/v1/users") + .body(Body::from("{\"id\":[1,2,3,4,5,6,7,8,9,10]}")) + .unwrap(); + + let resp = client.request(req).await.unwrap(); + assert_eq!(resp.status(), StatusCode::OK); + assert_eq!( + resp.headers().get("content-type").unwrap(), + "application/json" + ); + + let body = hyper::body::to_bytes(resp.into_body()).await.unwrap(); + + let resp: FetchUserResponse = serde_json::from_slice(&body).unwrap(); + assert_eq!(resp.count, 10); + assert_eq!(resp.data.len(), 10); + + // Lets try disconnect the database + global.db.close().await; + + // Drop global so that the context is cancelled later + drop(global); + + let req = Request::builder() + .method("POST") + .uri("http://localhost:8081/v1/users") + .body(Body::from("{\"id\":[1,2,3,4,5,6,7,8,9,10]}")) + .unwrap(); + + let resp = client.request(req).await.unwrap(); + assert_eq!(resp.status(), StatusCode::INTERNAL_SERVER_ERROR); + assert_eq!( + resp.headers().get("content-type").unwrap(), + "application/json" + ); + + let body = hyper::body::to_bytes(resp.into_body()).await.unwrap(); + assert_eq!( + body, + Bytes::from("{\"code\":500,\"message\":\"failed to fetch users\"}") + ); + + // The client uses Keep-Alive, so we need to drop it to release the global context + drop(client); + + tokio::time::timeout(Duration::from_secs(1), handler.cancel()) + .await + .expect("failed to cancel context"); + tokio::time::timeout(Duration::from_secs(1), handle) + .await + .expect("failed to cancel api") + .expect("api failed") + .expect("api failed"); +} diff --git a/backend/api/src/config.rs b/backend/api/src/config.rs index 9bdcf01c5..805e28861 100644 --- a/backend/api/src/config.rs +++ b/backend/api/src/config.rs @@ -1,7 +1,7 @@ use anyhow::Result; use serde::{Deserialize, Serialize}; -#[derive(Debug, Clone, Deserialize, Serialize)] +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] #[serde(default)] pub struct AppConfig { /// The log level to use, this is a tracing env filter @@ -23,7 +23,7 @@ impl Default for AppConfig { log_level: "api=info".to_string(), config_file: "config".to_string(), bind_address: "[::]:8080".to_string(), - database_url: "postgres://postgres:postgres@localhost:5432/postgres".to_string(), + database_url: "postgres://postgres:postgres@localhost:5432/scuffle-dev".to_string(), } } } @@ -33,3 +33,89 @@ impl AppConfig { Ok(common::config::parse(&AppConfig::default().config_file)?) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse() { + let config = AppConfig::parse().unwrap(); + assert_eq!(config, AppConfig::default()); + } + + #[test] + fn test_parse_env() { + std::env::set_var("SCUF_LOG_LEVEL", "api=debug"); + std::env::set_var("SCUF_BIND_ADDRESS", "[::]:8081"); + std::env::set_var( + "SCUF_DATABASE_URL", + "postgres://postgres:postgres@localhost:5433/postgres", + ); + + let config = AppConfig::parse().unwrap(); + assert_eq!(config.log_level, "api=debug"); + assert_eq!(config.bind_address, "[::]:8081"); + assert_eq!( + config.database_url, + "postgres://postgres:postgres@localhost:5433/postgres" + ); + } + + #[test] + fn test_parse_file() { + let tmp_dir = tempfile::tempdir().unwrap(); + let config_file = tmp_dir.path().join("config.toml"); + + std::fs::write( + &config_file, + r#" +log_level = "api=debug" +bind_address = "[::]:8081" +database_url = "postgres://postgres:postgres@localhost:5433/postgres" +"#, + ) + .unwrap(); + + std::env::set_var("SCUF_CONFIG_FILE", config_file.to_str().unwrap()); + + let config = AppConfig::parse().unwrap(); + + assert_eq!(config.log_level, "api=debug"); + assert_eq!(config.bind_address, "[::]:8081"); + assert_eq!( + config.database_url, + "postgres://postgres:postgres@localhost:5433/postgres" + ); + assert_eq!(config.config_file, config_file.to_str().unwrap()); + } + + #[test] + fn test_parse_file_env() { + let tmp_dir = tempfile::tempdir().unwrap(); + let config_file = tmp_dir.path().join("config.toml"); + + std::fs::write( + &config_file, + r#" +log_level = "api=debug" +bind_address = "[::]:8081" +database_url = "postgres://postgres:postgres@localhost:5433/postgres" +"#, + ) + .unwrap(); + + std::env::set_var("SCUF_CONFIG_FILE", config_file.to_str().unwrap()); + std::env::set_var("SCUF_LOG_LEVEL", "api=info"); + + let config = AppConfig::parse().unwrap(); + + assert_eq!(config.log_level, "api=info"); + assert_eq!(config.bind_address, "[::]:8081"); + assert_eq!( + config.database_url, + "postgres://postgres:postgres@localhost:5433/postgres" + ); + assert_eq!(config.config_file, config_file.to_str().unwrap()); + } +} diff --git a/backend/api/src/global.rs b/backend/api/src/global.rs index 89fe36b15..ebae135b8 100644 --- a/backend/api/src/global.rs +++ b/backend/api/src/global.rs @@ -1,6 +1,9 @@ +use common::context::Context; + use crate::config::AppConfig; pub struct GlobalState { pub config: AppConfig, pub db: sqlx::PgPool, + pub ctx: Context, } diff --git a/backend/api/src/main.rs b/backend/api/src/main.rs index 4c059b955..29249cb9a 100644 --- a/backend/api/src/main.rs +++ b/backend/api/src/main.rs @@ -1,8 +1,8 @@ -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use anyhow::Result; -use common::logging; -use tokio::select; +use common::{context::Context, logging, signal}; +use tokio::{select, signal::unix::SignalKind, time}; mod api; mod config; @@ -15,13 +15,34 @@ async fn main() -> Result<()> { let db = sqlx::PgPool::connect(&config.database_url).await?; - let global = Arc::new(global::GlobalState { config, db }); + let (ctx, handler) = Context::new(); + + let global = Arc::new(global::GlobalState { config, db, ctx }); tracing::info!("starting"); + let api_future = tokio::spawn(api::run(global.clone())); + + // Listen on both sigint and sigterm and cancel the context when either is received + let mut signal_handler = signal::SignalHandler::new() + .with_signal(SignalKind::interrupt()) + .with_signal(SignalKind::terminate()); + + select! { + r = api_future => tracing::error!("api stopped unexpectedly: {:?}", r), + _ = signal_handler.recv() => tracing::info!("shutting down"), + } + + // We cannot have a context in scope when we cancel the handler, otherwise it will deadlock. + drop(global); + + // Cancel the context + tracing::info!("waiting for tasks to finish"); + select! { - _ = api::run(global.clone()) => tracing::info!("api stopped"), - _ = tokio::signal::ctrl_c() => tracing::info!("ctrl-c received"), + _ = time::sleep(Duration::from_secs(60)) => tracing::warn!("force shutting down"), + _ = signal_handler.recv() => tracing::warn!("force shutting down"), + _ = handler.cancel() => tracing::info!("shutting down"), } Ok(()) diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 000000000..e9e3e8236 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,21 @@ +coverage: + status: + project: + default: + target: auto + threshold: 10% + +github_checks: + annotations: false + +codecov: + require_ci_to_pass: false + notify: + wait_for_ci: false + +comment: + layout: "diff, flags, files" + behavior: default + require_changes: false + require_head: no + require_base: no diff --git a/common/Cargo.toml b/common/Cargo.toml index 2dd5d776c..81ce795cd 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -14,4 +14,8 @@ tracing-subscriber = { version = "0.3.16", features = ["fmt", "env-filter", "jso chrono = { version = "0.4.23", default-features = false } bitmask-enum = "2.1.0" async-trait = "0.1.64" -sqlx = { version = "0.6.2", features = ["postgres", "offline"] } +sqlx = { version = "0.6.2", features = ["postgres", "offline", "runtime-tokio-rustls"] } +tokio = { version = "1.25.0", features = ["full"] } + +[dev-dependencies] +tempfile = "3.3.0" diff --git a/common/src/config.rs b/common/src/config.rs index 273b1bd11..f03cbee62 100644 --- a/common/src/config.rs +++ b/common/src/config.rs @@ -30,3 +30,6 @@ pub fn parse<'de, T: Deserialize<'de>>(config_file: &str) -> Result, + deadline: Option, + parent: Option, + cancel_receiver: broadcast::Receiver<()>, +} + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum CancelReason { + Parent, + Deadline, + Cancel, +} + +impl Display for CancelReason { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Self::Parent => write!(f, "Parent"), + Self::Deadline => write!(f, "Deadline"), + Self::Cancel => write!(f, "Cancel"), + } + } +} + +impl RawContext { + #[must_use] + fn new() -> (Self, Handler) { + let (sender, recv) = oneshot::channel(); + let (cancel_sender, cancel_receiver) = broadcast::channel(1); + + ( + Self { + _sender: sender, + deadline: None, + parent: None, + cancel_receiver, + }, + Handler { + recv, + cancel_sender, + }, + ) + } + + #[must_use] + fn with_deadline(deadline: Instant) -> (Self, Handler) { + let (mut ctx, handler) = Self::new(); + ctx.deadline = Some(deadline); + (ctx, handler) + } + + #[must_use] + fn with_parent(parent: Context, deadline: Option) -> (Self, Handler) { + let (mut ctx, handler) = Self::new(); + ctx.parent = Some(parent); + ctx.deadline = deadline; + (ctx, handler) + } + + fn done(&self) -> Pin + '_ + Send>> { + let mut recv = self.cancel_receiver.resubscribe(); + Box::pin(async move { + match (&self.parent, self.deadline) { + (Some(parent), Some(deadline)) => { + tokio::select! { + _ = parent.done() => CancelReason::Parent, + _ = tokio::time::sleep_until(deadline) => CancelReason::Deadline, + _ = recv.recv() => CancelReason::Cancel, + } + } + (Some(parent), None) => { + tokio::select! { + _ = parent.done() => CancelReason::Parent, + _ = recv.recv() => CancelReason::Cancel, + } + } + (None, Some(deadline)) => { + tokio::select! { + _ = tokio::time::sleep_until(deadline) => CancelReason::Deadline, + _ = recv.recv() => CancelReason::Cancel, + } + } + (None, None) => { + let _ = recv.recv().await; + CancelReason::Cancel + } + } + }) + } +} + +pub struct Handler { + recv: oneshot::Receiver<()>, + cancel_sender: broadcast::Sender<()>, +} + +impl Handler { + pub async fn done(&mut self) { + let _ = (&mut self.recv).await; + } + pub async fn cancel(self) { + drop(self.cancel_sender); + + let _ = self.recv.await; + } +} + +#[derive(Clone)] +pub struct Context(Arc); + +impl From for Context { + fn from(ctx: RawContext) -> Self { + Self(Arc::new(ctx)) + } +} + +impl Context { + pub fn new() -> (Self, Handler) { + let (ctx, handler) = RawContext::new(); + (ctx.into(), handler) + } + + pub fn with_deadline(deadline: Instant) -> (Self, Handler) { + let (ctx, handler) = RawContext::with_deadline(deadline); + (ctx.into(), handler) + } + + pub fn with_timeout(timeout: std::time::Duration) -> (Self, Handler) { + let deadline = Instant::now() + timeout; + Self::with_deadline(deadline) + } + + pub fn with_parent(parent: Context, deadline: Option) -> (Self, Handler) { + let (ctx, handler) = RawContext::with_parent(parent, deadline); + (ctx.into(), handler) + } + + pub async fn done(&self) -> CancelReason { + self.0.done().await + } +} + +#[cfg(test)] +mod tests; diff --git a/common/src/context/tests.rs b/common/src/context/tests.rs new file mode 100644 index 000000000..81ecea3be --- /dev/null +++ b/common/src/context/tests.rs @@ -0,0 +1,177 @@ +use std::time::Duration; + +use super::*; + +#[tokio::test] +async fn test_context_cancel() { + let (ctx, handler) = Context::new(); + + let handle = tokio::spawn(async move { + let reason = ctx.done().await; + assert_eq!(reason, CancelReason::Cancel); + }); + + tokio::time::timeout(Duration::from_millis(300), handler.cancel()) + .await + .expect("task should be cancelled"); + tokio::time::timeout(Duration::from_millis(300), handle) + .await + .expect("task should be cancelled") + .expect("panic in task"); +} + +#[tokio::test] +async fn test_context_deadline() { + let (ctx, mut handler) = Context::with_deadline(Instant::now() + Duration::from_millis(100)); + + let handle = tokio::spawn(async move { + let reason = ctx.done().await; + assert_eq!(reason, CancelReason::Deadline); + }); + + tokio::time::timeout(Duration::from_millis(300), handle) + .await + .expect("task should be cancelled") + .expect("panic in task"); + tokio::time::timeout(Duration::from_millis(300), handler.done()) + .await + .expect("task should be cancelled"); +} + +#[tokio::test] +async fn test_context_timeout() { + let (ctx, mut handler) = Context::with_timeout(Duration::from_millis(100)); + + let handle = tokio::spawn(async move { + let reason = ctx.done().await; + assert_eq!(reason, CancelReason::Deadline); + }); + + tokio::time::timeout(Duration::from_millis(300), handle) + .await + .expect("task should be cancelled") + .expect("panic in task"); + tokio::time::timeout(Duration::from_millis(300), handler.done()) + .await + .expect("task should be cancelled"); +} + +#[tokio::test] +async fn test_context_parent() { + let (parent, parent_handler) = Context::new(); + let (ctx, mut handler) = Context::with_parent(parent, None); + + let handle = tokio::spawn(async move { + let reason = ctx.done().await; + assert_eq!(reason, CancelReason::Parent); + }); + + tokio::time::timeout(Duration::from_millis(300), parent_handler.cancel()) + .await + .expect("task should be cancelled"); + tokio::time::timeout(Duration::from_millis(300), handle) + .await + .expect("task should be cancelled") + .expect("panic in task"); + tokio::time::timeout(Duration::from_millis(300), handler.done()) + .await + .expect("task should be cancelled"); +} + +#[tokio::test] +async fn test_context_parent_deadline() { + let (parent, mut parent_handler) = Context::new(); + let (ctx, mut handler) = + Context::with_parent(parent, Some(Instant::now() + Duration::from_millis(100))); + + let handle = tokio::spawn(async move { + let reason = ctx.done().await; + assert_eq!(reason, CancelReason::Deadline); + }); + + tokio::time::timeout(Duration::from_millis(300), parent_handler.done()) + .await + .expect("task should be cancelled"); + tokio::time::timeout(Duration::from_millis(300), handler.done()) + .await + .expect("task should be cancelled"); + tokio::time::timeout(Duration::from_millis(300), handle) + .await + .expect("task should be cancelled") + .expect("panic in task"); +} + +#[tokio::test] +async fn test_context_parent_deadline_cancel() { + let (parent, mut parent_handler) = Context::new(); + let (ctx, handler) = + Context::with_parent(parent, Some(Instant::now() + Duration::from_millis(100))); + + let handle = tokio::spawn(async move { + let reason = ctx.done().await; + assert_eq!(reason, CancelReason::Cancel); + }); + + tokio::time::timeout(Duration::from_millis(300), handler.cancel()) + .await + .expect("task should be cancelled"); + tokio::time::timeout(Duration::from_millis(300), parent_handler.done()) + .await + .expect("task should be cancelled"); + tokio::time::timeout(Duration::from_millis(300), handle) + .await + .expect("task should be cancelled") + .expect("panic in task"); +} + +#[tokio::test] +async fn test_context_parent_deadline_parent_cancel() { + let (parent, parent_handler) = Context::new(); + let (ctx, mut handler) = + Context::with_parent(parent, Some(Instant::now() + Duration::from_millis(100))); + + let handle = tokio::spawn(async move { + let reason = ctx.done().await; + assert_eq!(reason, CancelReason::Parent); + }); + + tokio::time::timeout(Duration::from_millis(300), parent_handler.cancel()) + .await + .expect("task should be cancelled"); + tokio::time::timeout(Duration::from_millis(300), handler.done()) + .await + .expect("task should be cancelled"); + tokio::time::timeout(Duration::from_millis(300), handle) + .await + .expect("task should be cancelled") + .expect("panic in task"); +} + +#[tokio::test] +async fn test_context_cancel_cloned() { + let (ctx, handler) = Context::new(); + let ctx2 = ctx.clone(); + + let handle = tokio::spawn(async move { + let reason = ctx.done().await; + assert_eq!(reason, CancelReason::Cancel); + }); + + tokio::time::timeout(Duration::from_millis(300), handler.cancel()) + .await + .expect_err("task should block because a clone exists"); + tokio::time::timeout(Duration::from_millis(300), handle) + .await + .expect("task should be cancelled") + .expect("panic in task"); + tokio::time::timeout(Duration::from_millis(300), ctx2.done()) + .await + .expect("task should be cancelled"); +} + +#[test] +fn test_fmt_reason() { + assert_eq!(format!("{}", CancelReason::Cancel), "Cancel"); + assert_eq!(format!("{}", CancelReason::Deadline), "Deadline"); + assert_eq!(format!("{}", CancelReason::Parent), "Parent"); +} diff --git a/common/src/lib.rs b/common/src/lib.rs index 0290f4c8e..6c0efc39e 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -1,3 +1,7 @@ +#![forbid(unsafe_code)] + pub mod config; +pub mod context; pub mod logging; +pub mod signal; pub mod types; diff --git a/common/src/logging.rs b/common/src/logging.rs index a7ea61bbe..6f6e23bd7 100644 --- a/common/src/logging.rs +++ b/common/src/logging.rs @@ -12,3 +12,6 @@ pub fn init(level: &str) -> Result<()> { Ok(()) } + +#[cfg(test)] +mod tests; diff --git a/common/src/logging/tests.rs b/common/src/logging/tests.rs new file mode 100644 index 000000000..3cd6b5812 --- /dev/null +++ b/common/src/logging/tests.rs @@ -0,0 +1,11 @@ +use super::*; + +#[test] +fn test_init() { + init("info").unwrap(); +} + +#[test] +fn test_with_bad_input() { + init("???").unwrap(); +} diff --git a/common/src/signal.rs b/common/src/signal.rs new file mode 100644 index 000000000..6a67bbd95 --- /dev/null +++ b/common/src/signal.rs @@ -0,0 +1,48 @@ +use tokio::{signal::unix::SignalKind, sync::mpsc}; + +pub struct SignalHandler { + signal_send: mpsc::Sender, + signal_recv: mpsc::Receiver, +} + +impl Default for SignalHandler { + fn default() -> Self { + let (signal_send, signal_recv) = mpsc::channel(1); + Self { + signal_send, + signal_recv, + } + } +} + +impl SignalHandler { + pub fn new() -> Self { + Self::default() + } + + pub fn with_signal(self, kind: SignalKind) -> Self { + let mut signal = tokio::signal::unix::signal(kind).expect("failed to create signal"); + + let send = self.signal_send.clone(); + tokio::spawn(async move { + loop { + signal.recv().await; + if send.send(kind).await.is_err() { + break; + } + } + }); + + self + } + + pub async fn recv(&mut self) -> SignalKind { + self.signal_recv + .recv() + .await + .expect("failed to receive signal") + } +} + +#[cfg(test)] +mod tests; diff --git a/common/src/signal/tests.rs b/common/src/signal/tests.rs new file mode 100644 index 000000000..fb32662c5 --- /dev/null +++ b/common/src/signal/tests.rs @@ -0,0 +1,41 @@ +use std::time::Duration; + +use tokio::process::Command; + +use super::*; + +#[tokio::test] +async fn test_signal() { + let mut handler = SignalHandler::new() + .with_signal(SignalKind::interrupt()) + .with_signal(SignalKind::terminate()); + + // Send a SIGINT to the process + // We need to get the current pid and send the signal to it + let pid = std::process::id(); + + Command::new("kill") + .arg("-s") + .arg("SIGINT") + .arg(pid.to_string()) + .status() + .await + .expect("failed to send SIGINT"); + + tokio::time::timeout(Duration::from_secs(1), handler.recv()) + .await + .expect("failed to receive signal"); + + // Send a SIGTERM to the process + Command::new("kill") + .arg("-s") + .arg("SIGTERM") + .arg(pid.to_string()) + .status() + .await + .expect("failed to send SIGINT"); + + tokio::time::timeout(Duration::from_secs(1), handler.recv()) + .await + .expect("failed to receive signal"); +} diff --git a/common/src/types/user.rs b/common/src/types/user.rs index a75f623ec..85c0d3de4 100644 --- a/common/src/types/user.rs +++ b/common/src/types/user.rs @@ -1,6 +1,6 @@ use chrono::{DateTime, Utc}; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct Model { pub id: i64, // bigint, primary key pub username: String, // varchar(32) diff --git a/dev-stack/db.docker-compose.yaml b/dev-stack/db.docker-compose.yml similarity index 95% rename from dev-stack/db.docker-compose.yaml rename to dev-stack/db.docker-compose.yml index 5ed3c6b37..9746fb770 100644 --- a/dev-stack/db.docker-compose.yaml +++ b/dev-stack/db.docker-compose.yml @@ -1,5 +1,7 @@ version: "3.1" +name: "db-scuffle-dev" + services: postgres: image: postgres:15.2 diff --git a/dev-stack/stack-example.docker-compose.yaml b/dev-stack/example.docker-compose.yml similarity index 88% rename from dev-stack/stack-example.docker-compose.yaml rename to dev-stack/example.docker-compose.yml index 688f41389..2716f1055 100644 --- a/dev-stack/stack-example.docker-compose.yaml +++ b/dev-stack/example.docker-compose.yml @@ -1,5 +1,7 @@ version: "3.1" +name: "stack-scuffle-dev" + services: api: build: @@ -9,10 +11,10 @@ services: - "8080:8080" environment: - SCUF_DATABASE_URL=postgres://postgres:postgres@postgres:5432/scuffle-dev - frontend: + website: build: context: .. - dockerfile: docker/frontend.Dockerfile + dockerfile: docker/website.Dockerfile ports: - "4000:4000" edge: diff --git a/docker/api.Dockerfile b/docker/api.Dockerfile index 560fe4b52..f48c72b5a 100644 --- a/docker/api.Dockerfile +++ b/docker/api.Dockerfile @@ -1,7 +1,13 @@ -FROM scratch +FROM gcr.io/distroless/static-debian11 -COPY target/x86_64-unknown-linux-musl/release/api /app/ +LABEL org.opencontainers.image.source=https://github.com/scuffletv/scuffle +LABEL org.opencontainers.image.description="API Container for ScuffleTV" +LABEL org.opencontainers.image.licenses=BSD-4-Clause + +COPY target/x86_64-unknown-linux-gnu/release/api /app/ STOPSIGNAL SIGINT +USER 1000 + ENTRYPOINT ["/app/api"] diff --git a/docker/build.Dockerfile b/docker/build.Dockerfile deleted file mode 100644 index c88713157..000000000 --- a/docker/build.Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -# syntax = docker/dockerfile:1.4 -FROM rust:1.67.1-alpine3.17 - -RUN < Build the project + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +cd $MASKFILE_DIR + +wasm-pack build --target web --out-name player --out-dir ./pkg --release +``` + +### dev + +> Run the project in development mode + +**OPTIONS** + +- watch + - flags: --watch + - desc: Watch for changes and rebuild + +```bash +cd $MASKFILE_DIR + +wasm-pack build --target web --out-name player --out-dir ./pkg --dev + +if [ "$watch" == "true" ]; then + while true; do + cargo-watch -q --postpone -s "wasm-pack build --target web --out-name player --out-dir ./pkg --dev" + done +fi +``` + +## clean + +> Clean the project + +```bash +cd $MASKFILE_DIR + +rm -rf pkg +``` diff --git a/frontend/website/package.json b/frontend/website/package.json index 360e432ac..a03f4cdc9 100644 --- a/frontend/website/package.json +++ b/frontend/website/package.json @@ -3,19 +3,20 @@ "version": "0.0.1", "private": true, "scripts": { - "dev": "concurrently -P --kill-others \"yarn wasm:watch\" \"vite dev {@}\" --", + "dev": "yarn wasm:dev && vite dev", "build": "yarn wasm && vite build", "preview": "vite preview", "test": "playwright test", "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json", "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch", "test:unit": "vitest", - "lint": "prettier --plugin-search-dir . --check . && eslint .", - "format": "prettier --plugin-search-dir . --write .", - "wasm": "just --justfile $(realpath ../player/Justfile) build", - "wasm:dev": "just --justfile $(realpath ../player/Justfile) dev", - "wasm:watch": "cargo-watch -w $(realpath ../player) -C $(pwd) -s \"yarn wasm:dev\"", - "clean": "rm -rf build .svelte-kit wasm.d.ts ../player/pkg" + "lint": "prettier --plugin-search-dir . --check \"**/*\" -u && eslint .", + "format": "prettier --plugin-search-dir . --write \"**/*\" -u", + "wasm": "mask --maskfile $(realpath ../player/maskfile.md) build", + "wasm:dev": "yarn wasm dev", + "wasm:watch": "yarn wasm:dev --watch", + "wasm:clean": "mask --maskfile $(realpath ../player/maskfile.md) clean", + "clean": "rm -rf build .svelte-kit wasm.d.ts && yarn wasm:clean" }, "devDependencies": { "@playwright/test": "^1.28.1", @@ -37,8 +38,11 @@ "tslib": "^2.4.1", "typescript": "^4.9.3", "vite": "^4.1.1", - "vitest": "^0.25.3", - "wasm-pack": "^0.10.3" + "vitest": "^0.25.3" }, - "type": "module" + "type": "module", + "dependencies": { + "@fontsource/be-vietnam-pro": "^4.5.8", + "@fontsource/comfortaa": "^4.5.11" + } } diff --git a/frontend/website/src/app.html b/frontend/website/src/app.html index effe0d0d2..6769ed5e8 100644 --- a/frontend/website/src/app.html +++ b/frontend/website/src/app.html @@ -3,7 +3,7 @@ - + %sveltekit.head% diff --git a/frontend/website/src/routes/+layout.svelte b/frontend/website/src/routes/+layout.svelte new file mode 100644 index 000000000..1594ff7cf --- /dev/null +++ b/frontend/website/src/routes/+layout.svelte @@ -0,0 +1,6 @@ + + + diff --git a/frontend/website/src/routes/+page.svelte b/frontend/website/src/routes/+page.svelte index 16af9e524..c070b475a 100644 --- a/frontend/website/src/routes/+page.svelte +++ b/frontend/website/src/routes/+page.svelte @@ -1,12 +1,334 @@ - - -

Welcome to SvelteKit

-

Visit kit.svelte.dev to read the documentation

+ +
+
+
+ +

Scuffle.tv

+

+ We’re building a streaming platform. +
+ A community driven streaming platform, that is. +

+
+
+ +

For Fun™

+

+ Have you ever thought of making your own streaming platform? No? +
+ Well... We are making a streaming platform. An open-source + streaming platform. +
+ You can join us on our journey of world conquer... It will be fun... Probably. +

+
+
+

So... how do we conquer the world?

+
+
+

90% Rust

+

10% Magic

+
+
+
+

+ With the power of Rust.. and + Magic, of course. +

+
+
+

Rust? Like the game?

+

+ No. Not the game, we’re talking ‘bout the programming language. +
+ If you’re a programmer you must have heard of it before. +
+ Fast and safe. That’s all you need to know about Rust. +
+ Did we mention it’s fast? Blazingly Fast™. +

+
+
+ +

So... What makes it community driven?

+

+ Open source. Made by a community of people who actually care about their work. +
+ We plan on having everything be as transparent as possible. +
+ Transparent moderation, development. +
+ That’s what it’s all about. +

+
+
+

Aren’t streaming platforms super expensive?

+

+ Yes, they are... +
+ Our biggest goal and challenge is making everything cost effective and self sustainable. +

+
+
+ + +

Alright, you convinced me. Where can I join in on the fun?

+

You can join our Discord, and check out the source code on GitHub.

+
+ +
+
+ + + diff --git a/frontend/website/svelte.config.js b/frontend/website/svelte.config.js index 87081a053..1448ea2cb 100644 --- a/frontend/website/svelte.config.js +++ b/frontend/website/svelte.config.js @@ -1,5 +1,5 @@ -import adapter from "svelte-adapter-deno"; import { vitePreprocess } from "@sveltejs/kit/vite"; +import adapter from "svelte-adapter-deno"; /** @type {import('@sveltejs/kit').Config} */ const config = { diff --git a/frontend/website/tests/test.ts b/frontend/website/tests/test.ts index 5076f5217..757c1c7a8 100644 --- a/frontend/website/tests/test.ts +++ b/frontend/website/tests/test.ts @@ -2,5 +2,5 @@ import { expect, test } from "@playwright/test"; test("index page has expected h1", async ({ page }) => { await page.goto("/"); - await expect(page.getByRole("heading", { name: "Welcome to SvelteKit" })).toBeVisible(); + await expect(page.getByRole("heading", { name: "Scuffle.tv" })).toBeVisible(); }); diff --git a/maskfile.md b/maskfile.md new file mode 100644 index 000000000..59f6cb7ae --- /dev/null +++ b/maskfile.md @@ -0,0 +1,615 @@ +# Scuffle Tasks + +## build + +> Build the project + + + +**OPTIONS** + +- container + - flags: --container + - desc: Build the project in a container +- static + - flags: --static + - desc: Compile the project statically + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +static=${static:-false} + +if [ "$container" == "true" ]; then + $MASK env backup + + function cleanup { + $MASK env restore + docker stop $PID >> /dev/null + } + trap cleanup EXIT + + PID=$(docker run -d --stop-signal SIGKILL --rm -v "$(pwd)":/pwd -w /pwd ghcr.io/scuffletv/build:latest mask build) + docker logs -f $PID +else + $MASK build rust --static=$static + $MASK build website +fi +``` + +### rust + +> Build all rust code + +**OPTIONS** + +- container + - flags: --container + - desc: Build the project in a container +- static + - flags: --static + - desc: Compile the project statically + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +static=${static:-false} +target=$(rustup show active-toolchain | cut -d '-' -f2- | cut -d ' ' -f1) + +if [ "$container" == "true" ]; then + $MASK env backup + + function cleanup { + $MASK env restore + docker stop $PID >> /dev/null + } + trap cleanup EXIT + + PID=$(docker run -d --stop-signal SIGKILL --rm -v "$(pwd)":/pwd -w /pwd ghcr.io/scuffletv/build:latest mask build rust --static=$static) + docker logs -f $PID +else + if [ "$static" == "true" ]; then + export RUSTFLAGS="-C target-feature=+crt-static" + fi + + cargo build --release --target=$target +fi +``` + +### website + +> Build the frontend website + +**OPTIONS** + +- container + - flags: --container + - desc: Build the project in a container + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +if [ "$container" == "true" ]; then + $MASK env backup + + function cleanup { + $MASK env restore + docker stop $PID >> /dev/null + } + trap cleanup EXIT + + PID=$(docker run -d --stop-signal SIGKILL --rm -v "$(pwd)":/pwd -w /pwd ghcr.io/scuffletv/build:1.67.1 yarn workspace website build) + docker logs -f $PID +else + yarn workspace website build +fi +``` + +## clean + +> Clean the project + +**OPTIONS** + +- all + + - flags: --all + - desc: Removes everything that isn't tracked by git (use with caution, this is irreversible) + +- node_modules + + - flags: --node-modules + - desc: Removes node_modules + +- env + - flags: --env + - desc: Removes .env + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +if [[ "$all" == "true" ]]; then + git clean -xfd +fi + +cargo clean +yarn workspace website clean + +if [ "$node_modules" == "true" ]; then + rm -rf node_modules +fi + +if [ "$env" == "true" ]; then + rm -rf .env +fi +``` + +## format + +> Format the project + +**OPTIONS** + +- no_rust + - flags: --no-rust + - type: bool + - desc: Disables Rust formatting +- no_js + - flags: --no-js + - type: bool + - desc: Disables JS formatting + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +if [ "$no_rust" != "true" ]; then + cargo fmt --all + cargo clippy --fix --allow-dirty --allow-staged + cargo clippy --fix --allow-dirty --allow-staged --package player --target wasm32-unknown-unknown +fi + +if [ "$no_js" != "true" ]; then + yarn format + yarn workspace website format +fi +``` + +## lint + +> Lint the project + +**OPTIONS** + +- no_rust + - flags: --no-rust + - type: bool + - desc: Disables Rust linting +- no_js + - flags: --no-js + - type: bool + - desc: Disables JS linting + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +if [ "$no_rust" != "true" ]; then + cargo clippy -- -D warnings + cargo clippy --package player --target wasm32-unknown-unknown -- -D warnings + cargo fmt --all --check + cargo sqlx prepare --check --merged -- --all-targets --all-features +fi + +if [ "$no_js" != "true" ]; then + yarn lint + yarn workspace website lint +fi +``` + +## audit + +> Audit the project + +**OPTIONS** + +- no_rust + - flags: --no-rust + - type: bool + - desc: Disables Rust linting +- no_js + - flags: --no-js + - type: bool + - desc: Disables JS linting + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +if [ "$no_rust" != "true" ]; then + cargo audit +fi + +if [ "$no_js" != "true" ]; then + yarn audit +fi +``` + +## test + +> Test the project + +**OPTIONS** + +- no_rust + - flags: --no-rust + - type: bool + - desc: Disables Rust testing +- no_js + - flags: --no-js + - type: bool + - desc: Disables JS testing + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +if [ "$no_rust" != "true" ]; then + cargo llvm-cov clean --workspace + cargo llvm-cov nextest --lcov --output-path lcov.info --ignore-filename-regex "(main.rs|tests.rs)" +fi + +if [ "$no_js" != "true" ]; then + yarn workspace website test +fi +``` + +## db + +> Database tasks + +### migrate + +> Migrate the database + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +sqlx database create +sqlx migrate run --source ./backend/migrations +``` + +#### create (name) + +> Create a database migration + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +sqlx migrate add "$name" --source ./backend/migrations -r +``` + +### rollback + +> Rollback the database + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +sqlx migrate revert --source ./backend/migrations +``` + +### prepare + +> Prepare the database + +**OPTIONS** + +- no_format + - flags: --no-format + - type: bool + - desc: Disables formatting + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +cargo sqlx prepare --merged -- --all-targets --all-features + +if [ "$no_format" != "true" ]; then + yarn prettier --write sqlx-data.json +fi +``` + +### reset + +> Reset the database + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +sqlx database reset --source ./backend/migrations +``` + +### up + +> Starts the docker compose stack + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +docker compose --file ./dev-stack/db.docker-compose.yml up -d +``` + +### down + +> Stops the docker compose stack + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +docker compose --file ./dev-stack/db.docker-compose.yml down +``` + +### status + +> Gets the status of the docker compose db stack + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +docker compose --file ./dev-stack/db.docker-compose.yml ps -a +``` + +## env + +> Environment tasks + +### generate + +> Generate the environment files + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +if [ ! -f .env ]; then + echo "DATABASE_URL=postgres://postgres:postgres@localhost:5432/scuffle-dev" > .env +fi +``` + +### backup + +> Backup the environment files + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +if [ -f .env ]; then + mv .env .env.bak +fi +``` + +### restore + +> Restore the environment files + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +if [ -f .env.bak ]; then + mv .env.bak .env +fi +``` + +## stack + +> Development stack tasks + +### up + +> Starts the docker compose stack + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +docker compose --file ./dev-stack/docker-compose.yml up -d --build +``` + +### down + +> Stops the docker compose stack + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +docker compose --file ./dev-stack/docker-compose.yml down +``` + +### init + +> Initializes the development stack + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +cp ./dev-stack/example.docker-compose.yml ./dev-stack/docker-compose.yml +``` + +### status + +> Gets the status of the docker compose stack + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +docker compose --file ./dev-stack/docker-compose.yml ps -a +``` + +### logs (service) + +> Prints the logs of the given service +> You can show logs of multiple services by passing a single string with space separated service names + +**OPTIONS** + +- follow + - flags: -f, --follow + - type: bool + - desc: Follow log output + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +follow=${follow:-false} + +docker compose --file ./dev-stack/docker-compose.yml logs --follow=$follow $service +``` + +## bootstrap + +> Bootstrap the project + +**OPTIONS** + +- no_rust + - flags: --no-rust + - type: bool + - desc: Disables Rust bootstrapping +- no_js + - flags: --no-js + - type: bool + - desc: Disables JS bootstrapping +- no_js_tests + - flags: --no-js-tests + - type: bool + - desc: Disables JS tests bootstrapping +- no_env + - flags: --no-env + - type: bool + - desc: Disables environment bootstrapping +- no_docker + - flags: --no-docker + - type: bool + - desc: Disables docker bootstrapping +- no_stack + - flags: --no-stack + - type: bool + - desc: Disables stack bootstrapping +- no_db + - flags: --no-db + - type: bool + - desc: Disables database bootstrapping + +```bash +set -e +if [[ "$verbose" == "true" ]]; then + set -x +fi + +if [ "$no_rust" != "true" ]; then + rustup update + rustup target add wasm32-unknown-unknown + + rustup component add rustfmt clippy llvm-tools-preview + + cargo install cargo-binstall + cargo binstall cargo-watch -y + cargo install sqlx-cli --features rustls,postgres --no-default-features + cargo binstall wasm-pack -y + cargo binstall cargo-llvm-cov -y + cargo binstall cargo-nextest -y + cargo install cargo-audit --features vendored-openssl +fi + +if [ "$no_js" != "true" ]; then + yarn install + + if [ "$no_js_tests" != "true" ]; then + yarn playwright install + fi +fi + +if [ "$no_env" != "true" ]; then + $MASK env generate +fi + +if [ "$no_docker" != "true" ]; then + docker network create scuffle-dev || true + + if [ "$no_stack" != "true" ]; then + $MASK stack init + fi + + if [ "$no_db" != "true" ]; then + $MASK db up + $MASK db migrate + fi +fi +``` diff --git a/package.json b/package.json index 51f1eb705..443f5b644 100644 --- a/package.json +++ b/package.json @@ -13,7 +13,7 @@ ], "scripts": { "prepare": "husky install", - "lint": "prettier --check .", - "format": "prettier --write ." + "lint": "prettier --check \"**/*\" -u", + "format": "prettier --write \"**/*\" -u" } } diff --git a/sqlx-data.json b/sqlx-data.json index 05d9d6f59..c2110f43d 100644 --- a/sqlx-data.json +++ b/sqlx-data.json @@ -1,3 +1,49 @@ { - "db": "PostgreSQL" + "db": "PostgreSQL", + "165e3eb51d255f5b3013430059b5267269df7285cdb0736efddbf8fca9c32ed2": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [] + } + }, + "query": "INSERT INTO users (id, username, password_hash, email, email_verified, created_at, last_login_at) VALUES\n (1, 'admin', 'abc', 'xyz@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'),\n (2, 'user', 'abc2', 'xyz2@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'),\n (3, 'user1', 'abc3', 'xyz3@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'),\n (4, 'user2', 'abc4', 'xyz4@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'),\n (5, 'user3', 'abc5', 'xyz5@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'),\n (6, 'user4', 'abc6', 'xyz6@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'),\n (7, 'user5', 'abc7', 'xyz7@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'),\n (8, 'user6', 'abc8', 'xyz8@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'),\n (9, 'user7', 'abc9', 'xyz9@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'),\n (10, 'user8', 'abc10', 'xyz10@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00'),\n (11, 'user9', 'abc11', 'xyz11@gmail.com', true, '2021-01-01 00:00:00', '2021-01-01 00:00:00');" + }, + "1c9b66121e84a7657c44ebd2c19957f71c02ef364f4c1727c68d530a3b68c31a": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "username", + "ordinal": 1, + "type_info": "Varchar" + }, + { + "name": "created_at", + "ordinal": 2, + "type_info": "Timestamptz" + } + ], + "nullable": [false, false, false], + "parameters": { + "Left": ["Int8Array"] + } + }, + "query": "\n SELECT id, username, created_at\n FROM users\n WHERE id = ANY($1)\n " + }, + "f4f8f8c2668ec23ba1f4a315d74087521496603e8b1bc10475a864001e795593": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [] + } + }, + "query": "DELETE FROM users" + } } diff --git a/yarn.lock b/yarn.lock index a1dee9f83..5fec2e5a4 100644 --- a/yarn.lock +++ b/yarn.lock @@ -316,6 +316,16 @@ minimatch "^3.1.2" strip-json-comments "^3.1.1" +"@fontsource/be-vietnam-pro@^4.5.8": + version "4.5.8" + resolved "https://registry.yarnpkg.com/@fontsource/be-vietnam-pro/-/be-vietnam-pro-4.5.8.tgz#fb26070f83ab763df8952a8bb89632b7f1dff7c3" + integrity sha512-02BI3zS+7Rs4vffa2U6AznQEjG2skMn4nswjEzwQc7uzE04n1MLxh9Mhf3KT5GBuP9HuPHJYVgytcGQ1xNLJfw== + +"@fontsource/comfortaa@^4.5.11": + version "4.5.11" + resolved "https://registry.yarnpkg.com/@fontsource/comfortaa/-/comfortaa-4.5.11.tgz#f3dc5e07637fc26b87e80897c611d60229d5e24c" + integrity sha512-KKC2C6KbF9BD6m9+wMf5hK0wFjIi3p3J/6C4JZW6OF9G6K4qZJFp2dBZzsEBepKh4s9/Q5G1SWsUUZY3ZeZNDA== + "@humanwhocodes/config-array@^0.11.8": version "0.11.8" resolved "https://registry.yarnpkg.com/@humanwhocodes/config-array/-/config-array-0.11.8.tgz#03595ac2075a4dc0f191cc2131de14fbd7d410b9" @@ -898,7 +908,7 @@ check-error@^1.0.2: resolved "https://registry.yarnpkg.com/check-error/-/check-error-1.0.2.tgz#574d312edd88bb5dd8912e9286dd6c0aed4aac82" integrity sha512-BrgHpW9NURQgzoNyjfq0Wu6VFO6D7IZEmJNdtgNqpzGG8RuNFHt2jQxWlAs4HMe119chBnv+34syEZtc6IhLtA== -chokidar@^3.4.1: +"chokidar@>=3.0.0 <4.0.0", chokidar@^3.4.1: version "3.5.3" resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.5.3.tgz#1cf37c8707b932bd1af1ae22c0432e2acd1903bd" integrity sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw== @@ -1687,6 +1697,11 @@ ignore@^5.2.0: resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.2.4.tgz#a291c0c6178ff1b960befe47fcdec301674a6324" integrity sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ== +immutable@^4.0.0: + version "4.2.4" + resolved "https://registry.yarnpkg.com/immutable/-/immutable-4.2.4.tgz#83260d50889526b4b531a5e293709a77f7c55a2a" + integrity sha512-WDxL3Hheb1JkRN3sQkyujNlL/xRjAo3rJtaU5xeufUauG66JdMr32bLj4gF+vWl84DIA3Zxw7tiAjneYzRRw+w== + import-fresh@^3.0.0, import-fresh@^3.2.1: version "3.3.0" resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.3.0.tgz#37162c25fcb9ebaa2e6e53d5b4d88ce17d9e0c2b" @@ -2601,6 +2616,15 @@ sander@^0.5.0: mkdirp "^0.5.1" rimraf "^2.5.2" +sass@^1.58.3: + version "1.58.3" + resolved "https://registry.yarnpkg.com/sass/-/sass-1.58.3.tgz#2348cc052061ba4f00243a208b09c40e031f270d" + integrity sha512-Q7RaEtYf6BflYrQ+buPudKR26/lH+10EmO9bBqbmPh/KeLqv8bjpTNqxe71ocONqXq+jYiCbpPUmQMS+JJPk4A== + dependencies: + chokidar ">=3.0.0 <4.0.0" + immutable "^4.0.0" + source-map-js ">=0.6.2 <2.0.0" + "semver@2 || 3 || 4 || 5": version "5.7.1" resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.1.tgz#a954f931aeba508d307bbf069eff0c01c96116f7" @@ -2690,7 +2714,7 @@ sorcery@^0.11.0: minimist "^1.2.0" sander "^0.5.0" -source-map-js@^1.0.2: +"source-map-js@>=0.6.2 <2.0.0", source-map-js@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.0.2.tgz#adbc361d9c62df380125e7f161f71c826f1e490c" integrity sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==