diff --git a/.all-contributorsrc b/.all-contributorsrc deleted file mode 100644 index 3a416f917e..0000000000 --- a/.all-contributorsrc +++ /dev/null @@ -1,194 +0,0 @@ -{ - "files": [ - "README.md" - ], - "imageSize": 100, - "commit": false, - "contributors": [ - { - "login": "shogunpurple", - "name": "Martin McKeaveney", - "avatar_url": "https://avatars1.githubusercontent.com/u/11256663?v=4", - "profile": "http://martinmck.com", - "contributions": [ - "code", - "doc", - "test", - "infra" - ] - }, - { - "login": "mike12345567", - "name": "Michael Drury", - "avatar_url": "https://avatars2.githubusercontent.com/u/4407001?v=4", - "profile": "http://www.michaeldrury.co.uk/", - "contributions": [ - "doc", - "code", - "test", - "infra" - ] - }, - { - "login": "aptkingston", - "name": "Andrew Kingston", - "avatar_url": "https://avatars3.githubusercontent.com/u/9075550?v=4", - "profile": "https://github.com/aptkingston", - "contributions": [ - "doc", - "code", - "test", - "design" - ] - }, - { - "login": "mjashanks", - "name": "Michael Shanks", - "avatar_url": "https://avatars3.githubusercontent.com/u/3524181?v=4", - "profile": "https://budibase.com/", - "contributions": [ - "doc", - "code", - "test" - ] - }, - { - "login": "kevmodrome", - "name": "Kevin Åberg Kultalahti", - "avatar_url": "https://avatars3.githubusercontent.com/u/534488?v=4", - "profile": "https://github.com/kevmodrome", - "contributions": [ - "doc", - "code", - "test" - ] - }, - { - "login": "joebudi", - "name": "Joe", - "avatar_url": "https://avatars2.githubusercontent.com/u/49767913?v=4", - "profile": "https://www.budibase.com/", - "contributions": [ - "doc", - "code", - "content", - "design" - ] - }, - { - "login": "Rory-Powell", - "name": "Rory Powell", - "avatar_url": "https://avatars.githubusercontent.com/u/8755148?v=4", - "profile": "https://github.com/Rory-Powell", - "contributions": [ - "code", - "doc", - "test" - ] - }, - { - "login": "PClmnt", - "name": "Peter Clement", - "avatar_url": "https://avatars.githubusercontent.com/u/5665926?v=4", - "profile": "https://github.com/PClmnt", - "contributions": [ - "code", - "doc", - "test" - ] - }, - { - "login": "Conor-Mack", - "name": "Conor_Mack", - "avatar_url": "https://avatars1.githubusercontent.com/u/36074859?v=4", - "profile": "https://github.com/Conor-Mack", - "contributions": [ - "code", - "test" - ] - }, - { - "login": "pngwn", - "name": "pngwn", - "avatar_url": "https://avatars1.githubusercontent.com/u/12937446?v=4", - "profile": "https://github.com/pngwn", - "contributions": [ - "code", - "test" - ] - }, - { - "login": "HugoLd", - "name": "HugoLd", - "avatar_url": "https://avatars0.githubusercontent.com/u/26521848?v=4", - "profile": "https://github.com/HugoLd", - "contributions": [ - "code" - ] - }, - { - "login": "victoriasloan", - "name": "victoriasloan", - "avatar_url": "https://avatars.githubusercontent.com/u/9913651?v=4", - "profile": "https://github.com/victoriasloan", - "contributions": [ - "code" - ] - }, - { - "login": "yashank09", - "name": "yashank09", - "avatar_url": "https://avatars.githubusercontent.com/u/37672190?v=4", - "profile": "https://github.com/yashank09", - "contributions": [ - "code" - ] - }, - { - "login": "SOVLOOKUP", - "name": "SOVLOOKUP", - "avatar_url": "https://avatars.githubusercontent.com/u/53158137?v=4", - "profile": "https://github.com/SOVLOOKUP", - "contributions": [ - "code" - ] - }, - { - "login": "seoulaja", - "name": "seoulaja", - "avatar_url": "https://avatars.githubusercontent.com/u/15101654?v=4", - "profile": "https://github.com/seoulaja", - "contributions": [ - "translation" - ] - }, - { - "login": "mslourens", - "name": "Maurits Lourens", - "avatar_url": "https://avatars.githubusercontent.com/u/1907152?v=4", - "profile": "https://github.com/mslourens", - "contributions": [ - "test", - "code" - ] - }, - { - "login": "Rory-Powell", - "name": "Rory Powell", - "avatar_url": "https://avatars.githubusercontent.com/u/8755148?v=4", - "profile": "https://github.com/Rory-Powell", - "contributions": [ - "infra", - "test", - "code" - ] - } - ], - "contributorsPerLine": 7, - "projectName": "budibase", - "projectOwner": "Budibase", - "repoType": "github", - "repoHost": "https://github.com", - "skipCi": true, - "commitConvention": "none" -} diff --git a/.eslintignore b/.eslintignore index 021fe8e367..94984a446f 100644 --- a/.eslintignore +++ b/.eslintignore @@ -6,5 +6,11 @@ packages/server/coverage packages/worker/coverage packages/backend-core/coverage packages/server/client +packages/server/coverage packages/builder/.routify packages/sdk/sdk +packages/account-portal/packages/server/build +packages/account-portal/packages/ui/.routify +packages/account-portal/packages/ui/build +**/*.ivm.bundle.js +packages/server/build/oldClientVersions/**/** diff --git a/.eslintrc.json b/.eslintrc.json index 75584b8163..2a40c6cc29 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -19,6 +19,7 @@ "bundle.js" ], "extends": ["eslint:recommended"], + "plugins": ["import", "eslint-plugin-local-rules"], "overrides": [ { "files": ["**/*.svelte"], @@ -30,25 +31,73 @@ "sourceType": "module", "allowImportExportEverywhere": true } - }, { "files": ["**/*.ts"], + "excludedFiles": ["qa-core/**"], "parser": "@typescript-eslint/parser", + "plugins": ["@typescript-eslint"], "extends": ["eslint:recommended"], + "globals": { + "NodeJS": true + }, "rules": { "no-unused-vars": "off", - "no-inner-declarations": "off", - "no-case-declarations": "off", - "no-useless-escape": "off", - "no-undef": "off", - "no-prototype-builtins": "off" + "@typescript-eslint/no-unused-vars": "error", + "local-rules/no-budibase-imports": "error" + } + }, + { + "files": ["**/*.spec.ts"], + "excludedFiles": ["qa-core/**"], + "parser": "@typescript-eslint/parser", + "plugins": ["jest", "@typescript-eslint"], + "extends": ["eslint:recommended", "plugin:jest/recommended"], + "env": { + "jest/globals": true + }, + "globals": { + "NodeJS": true + }, + "rules": { + "no-unused-vars": "off", + "@typescript-eslint/no-unused-vars": "error", + "local-rules/no-test-com": "error", + "local-rules/email-domain-example-com": "error", + "no-console": "warn", + // We have a lot of tests that don't have assertions, they use our test + // API client that does the assertions for them + "jest/expect-expect": "off", + // We do this in some tests where the behaviour of internal tables + // differs to external, but the API is broadly the same + "jest/no-conditional-expect": "off" + } + }, + { + "files": [ + "packages/builder/**/*", + "packages/client/**/*", + "packages/frontend-core/**/*" + ], + "rules": { + "no-console": ["error", { "allow": ["warn", "error", "debug"] }] } } ], "rules": { "no-self-assign": "off", - "no-unused-vars": ["error", { "varsIgnorePattern": "^_", "argsIgnorePattern": "^_", "destructuredArrayIgnorePattern": "^_" }] + "no-unused-vars": [ + "error", + { + "varsIgnorePattern": "^_", + "argsIgnorePattern": "^_", + "destructuredArrayIgnorePattern": "^_" + } + ], + "import/no-relative-packages": "error", + "import/export": "error", + "import/no-duplicates": "error", + "import/newline-after-import": "error" }, "globals": { "GeolocationPositionError": true diff --git a/.github/workflows/README.md b/.github/workflows/README.md index 9b75a2e73a..029dd5af42 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -1,139 +1,45 @@ # Budibase CI Pipelines -Welcome to the budibase CI pipelines directory. This document details what each of the CI pipelines are for, and come common combinations. +Welcome to the Budibase CI pipelines directory. This document details what each of the CI pipelines are for, and come common combinations. ## All CI Pipelines -### Note - -- When running workflow dispatch jobs, ensure you always run them off the `master` branch. It defaults to `develop`, so double check before running any jobs. The exception to this case is the `deploy-release` job which requires the develop branch. - ### Standard CI Build Job (budibase_ci.yml) Triggers: -- PR or push to develop - PR or push to master -The standard CI Build job is what runs when you raise a PR to develop or master. +The standard CI Build job is what runs when you raise a PR to master. - Installs all dependencies, - builds the project - run the unit tests - Generate test coverage metrics with codecov - Run the integration tests +- Check that the pro and account portal submodules are pointing to the lastest master head -### Release Develop Job (release-develop.yml) +### Release Job (tag-release.yml) Triggers: -- Push to develop +- Manually triggered -The job responsible for building, tagging and pushing docker images out to the test and release environments. +This job is responsible for building and pushing all the production services, packages and images. This is done via [budibase-deploys](https://github.com/Budibase/budibase-deploys/actions/workflows/release.yml). -- Installs all dependencies -- builds the project -- run the unit tests -- publish the budibase JS packages under a prerelease tag to NPM -- build, tag and push docker images under the `develop` tag to docker hub +An input is required, indicating if the new version will be a `patch`, `minor` or `major` bump. -These images will then be pulled by the test and release environments, updating the latest automatically. Discord notifications are sent to the #infra channel when this occurs. - -### Release Job (release.yml) - -Triggers: - -- Push to master - -This job is responsible for building and pushing the latest code to NPM and docker hub, so that it can be deployed. - -- Installs all dependencies -- builds the project -- run the unit tests -- publish the budibase JS packages under a release tag to NPM (always incremented by patch versions) -- build, tag and push docker images under the `v.x.x.x` (the tag of the NPM release) tag to docker hub - -### Release Selfhost Job (release-selfhost.yml) - -Triggers: - -- Manual Workflow Dispatch Trigger - -This job is responsible for delivering the latest version of budibase to those that are self-hosting. - -This job relies on the release job to have run first, so the latest image is pushed to dockerhub. This job then will pull the latest version from `lerna.json` and try to find an image in dockerhub corresponding to that version. For example, if the version in `lerna.json` is `1.0.0`: - -- Pull the images for all budibase services tagged `v1.0.0` from dockerhub -- Tag these images as `latest` -- Push them back to dockerhub. This now means anyone who pulls `latest` (self hosters using docker-compose) will get the latest version. -- Build and release the budibase helm chart for kubernetes users -- Perform a github release with the latest version. You can see previous releases here (https://github.com/Budibase/budibase/releases) - -### Deploy Release (deploy-release.yml) - -Triggers: - -- Manual Workflow Dispatch Trigger - -This job is responsible for deploying to our release, cloud kubernetes environment. You must run the release job first, to ensure that the latest images have been built and pushed to docker hub. After kicking off this job, the following will occur: - -- Checks out the release branch -- Pulls the latest `values.yaml` from budibase infra, a private repo containing budibases infrastructure configuration -- Gets the latest budibase version from `lerna.json`, if it hasn't been specified in the workflow when you kicked it off -- Configures AWS Credentials -- Deploys the helm chart in the budibase repo to our preproduction EKS cluster, injecting the `values.yaml` we pulled from budibase-infra -- Fires off a discord webhook in the #infra channel to show that the deployment completely successfully. - -### Deploy Preprod (deploy-preprod.yml) - -Triggers: - -- Manual Workflow Dispatch Trigger - -This job is responsible for deploying to our preprod, cloud kubernetes environment. You must run the release job first, to ensure that the latest images have been built and pushed to docker hub. After kicking off this job, the following will occur: - -- Checks out the master branch -- Pulls the latest `values.yaml` from budibase infra, a private repo containing budibases infrastructure configuration -- Gets the latest budibase version from `lerna.json`, if it hasn't been specified in the workflow when you kicked it off -- Configures AWS Credentials -- Deploys the helm chart in the budibase repo to our preprod EKS cluster, injecting the `values.yaml` we pulled from budibase-infra -- Fires off a discord webhook in the #infra channel to show that the deployment completely successfully. - -### Deploy Production (deploy-cloud.yml) - -Triggers: - -- Manual Workflow Dispatch Trigger - -This job is responsible for deploying to our production, cloud kubernetes environment. You must run the release job first, to ensure that the latest images have been built and pushed to docker hub. You can also manually enter a version number for this job, so you can perform rollbacks or upgrade to a specific version. After kicking off this job, the following will occur: - -- Checks out the master branch -- Pulls the latest `values.yaml` from budibase infra, a private repo containing budibases infrastructure configuration -- Gets the latest budibase version from `lerna.json`, if it hasn't been specified in the workflow when you kicked it off -- Configures AWS Credentials -- Deploys the helm chart in the budibase repo to our production EKS cluster, injecting the `values.yaml` we pulled from budibase-infra -- Fires off a discord webhook in the #infra channel to show that the deployment completely successfully. +More documentation can be found in here: https://budibase.atlassian.net/wiki/spaces/DEVOPS/pages/347930625/Production+release ## Common Workflows ### Deploy Changes to Production (Release) -- Merge `develop` into `master` -- Wait for budibase CI job and release job to run -- Run cloud deploy job -- Run release selfhost job - -### Deploy Changes to Production (Hotfix) - -- Branch off `master` -- Perform your hotfix -- Merge back into `master` -- Wait for budibase CI job and release job to run -- Run cloud deploy job -- Run release selfhost job +- Merge your changes into `master` +- Run `tag-release.yml` +- Check the progress in [budibase-deploys](https://github.com/Budibase/budibase-deploys/actions/workflows/release.yml) ### Rollback A Bad Cloud Deployment -- Kick off cloud deploy job -- Ensure you are running off master -- Enter the version number of the last known good version of budibase. For example `1.0.0` +Rollback documentation can be found in here. +https://budibase.atlassian.net/wiki/spaces/DEVOPS/pages/347930625/Production+release#Rollback diff --git a/.github/workflows/budibase_ci.yml b/.github/workflows/budibase_ci.yml index 77867c8617..030ad6578e 100644 --- a/.github/workflows/budibase_ci.yml +++ b/.github/workflows/budibase_ci.yml @@ -12,6 +12,13 @@ on: - master pull_request: workflow_dispatch: + workflow_call: + inputs: + run_as_oss: + type: boolean + required: false + description: Force running checks as if it was an OSS contributor + default: false env: BRANCH: ${{ github.event.pull_request.head.ref }} @@ -19,56 +26,48 @@ env: PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} NX_BASE_BRANCH: origin/${{ github.base_ref }} USE_NX_AFFECTED: ${{ github.event_name == 'pull_request' }} + IS_OSS_CONTRIBUTOR: ${{ inputs.run_as_oss == true || (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase') }} jobs: lint: runs-on: ubuntu-latest steps: - - name: Checkout repo and submodules - uses: actions/checkout@v3 - if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase' + - name: Checkout repo + uses: actions/checkout@v4 with: - submodules: true + submodules: ${{ env.IS_OSS_CONTRIBUTOR == 'false' }} token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} - - name: Checkout repo only - uses: actions/checkout@v3 - if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase' - - name: Use Node.js 18.x - uses: actions/setup-node@v3 + - name: Use Node.js 20.x + uses: actions/setup-node@v4 with: - node-version: 18.x - cache: "yarn" + node-version: 20.x + cache: yarn - run: yarn --frozen-lockfile - run: yarn lint build: runs-on: ubuntu-latest steps: - - name: Checkout repo and submodules - uses: actions/checkout@v3 - if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase' + - name: Checkout repo + uses: actions/checkout@v4 with: - submodules: true + submodules: ${{ env.IS_OSS_CONTRIBUTOR == 'false' }} token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} fetch-depth: 0 - - name: Checkout repo only - uses: actions/checkout@v3 - if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase' - with: - fetch-depth: 0 - - name: Use Node.js 18.x - uses: actions/setup-node@v3 + - name: Use Node.js 20.x + uses: actions/setup-node@v4 with: - node-version: 18.x - cache: "yarn" + node-version: 20.x + cache: yarn - run: yarn --frozen-lockfile # Run build all the projects - name: Build run: | - yarn build + yarn build:oss + yarn build:account-portal # Check the types of the projects built via esbuild - name: Check types run: | @@ -78,62 +77,57 @@ jobs: yarn check:types fi - test-libraries: + helm-lint: runs-on: ubuntu-latest steps: - - name: Checkout repo and submodules - uses: actions/checkout@v3 - if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase' - with: - submodules: true - token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} - fetch-depth: 0 - - name: Checkout repo only - uses: actions/checkout@v3 - if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase' + - name: Checkout repo + uses: actions/checkout@v4 with: fetch-depth: 0 - - name: Use Node.js 18.x - uses: actions/setup-node@v3 + - name: Use Node.js 20.x + uses: azure/setup-helm@v3 + - run: cd charts/budibase && helm lint . + + test-libraries: + runs-on: ubuntu-latest + steps: + - name: Checkout repo + uses: actions/checkout@v4 with: - node-version: 18.x - cache: "yarn" + submodules: ${{ env.IS_OSS_CONTRIBUTOR == 'false' }} + token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} + fetch-depth: 0 + + - name: Use Node.js 20.x + uses: actions/setup-node@v4 + with: + node-version: 20.x + cache: yarn - run: yarn --frozen-lockfile - name: Test run: | if ${{ env.USE_NX_AFFECTED }}; then - yarn test --ignore=@budibase/worker --ignore=@budibase/server --ignore=@budibase/pro --since=${{ env.NX_BASE_BRANCH }} + yarn test --ignore=@budibase/worker --ignore=@budibase/server --since=${{ env.NX_BASE_BRANCH }} else - yarn test --ignore=@budibase/worker --ignore=@budibase/server --ignore=@budibase/pro + yarn test --ignore=@budibase/worker --ignore=@budibase/server fi - - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos - name: codecov-umbrella - verbose: true test-worker: runs-on: ubuntu-latest steps: - - name: Checkout repo and submodules - uses: actions/checkout@v3 - if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase' + - name: Checkout repo + uses: actions/checkout@v4 with: - submodules: true + submodules: ${{ env.IS_OSS_CONTRIBUTOR == 'false' }} token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} fetch-depth: 0 - - name: Checkout repo only - uses: actions/checkout@v3 - if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase' - with: - fetch-depth: 0 - - name: Use Node.js 18.x - uses: actions/setup-node@v3 + - name: Use Node.js 20.x + uses: actions/setup-node@v4 with: - node-version: 18.x - cache: "yarn" + node-version: 20.x + cache: yarn - run: yarn --frozen-lockfile - name: Test worker run: | @@ -143,34 +137,36 @@ jobs: yarn test --scope=@budibase/worker fi - - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN || github.token }} # not required for public repos - name: codecov-umbrella - verbose: true - test-server: runs-on: ubuntu-latest + env: + DEBUG: testcontainers,testcontainers:exec,testcontainers:build,testcontainers:pull steps: - - name: Checkout repo and submodules - uses: actions/checkout@v3 - if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase' + - name: Checkout repo + uses: actions/checkout@v4 with: - submodules: true + submodules: ${{ env.IS_OSS_CONTRIBUTOR == 'false' }} token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} fetch-depth: 0 - - name: Checkout repo only - uses: actions/checkout@v3 - if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase' - with: - fetch-depth: 0 - - name: Use Node.js 18.x - uses: actions/setup-node@v3 + - name: Use Node.js 20.x + uses: actions/setup-node@v4 with: - node-version: 18.x - cache: "yarn" + node-version: 20.x + cache: yarn + + - name: Pull testcontainers images + run: | + docker pull mcr.microsoft.com/mssql/server:2022-latest + docker pull mysql:8.3 + docker pull postgres:16.1-bullseye + docker pull mongo:7.0-jammy + docker pull mariadb:lts + docker pull testcontainers/ryuk:0.5.1 + docker pull budibase/couchdb + - run: yarn --frozen-lockfile + - name: Test server run: | if ${{ env.USE_NX_AFFECTED }}; then @@ -179,58 +175,26 @@ jobs: yarn test --scope=@budibase/server fi - - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN || github.token }} # not required for public repos - name: codecov-umbrella - verbose: true - - test-pro: - runs-on: ubuntu-latest - if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase' - steps: - - name: Checkout repo and submodules - uses: actions/checkout@v3 - with: - submodules: true - token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} - fetch-depth: 0 - - - name: Use Node.js 18.x - uses: actions/setup-node@v3 - with: - node-version: 18.x - cache: "yarn" - - run: yarn --frozen-lockfile - - name: Test - run: | - if ${{ env.USE_NX_AFFECTED }}; then - yarn test --scope=@budibase/pro --since=${{ env.NX_BASE_BRANCH }} - else - yarn test --scope=@budibase/pro - fi - integration-test: runs-on: ubuntu-latest steps: - - name: Checkout repo and submodules - uses: actions/checkout@v3 - if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase' + - name: Checkout repo + uses: actions/checkout@v4 with: - submodules: true + submodules: ${{ env.IS_OSS_CONTRIBUTOR == 'false' }} token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} - - name: Checkout repo only - uses: actions/checkout@v3 - if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase' - - name: Use Node.js 18.x - uses: actions/setup-node@v3 + - name: Use Node.js 20.x + uses: actions/setup-node@v4 with: - node-version: 18.x - cache: "yarn" + node-version: 20.x + cache: yarn - run: yarn --frozen-lockfile - name: Build packages run: yarn build --scope @budibase/server --scope @budibase/worker + - name: Build backend-core for OSS contributor (required for pro) + if: ${{ env.IS_OSS_CONTRIBUTOR == 'true' }} + run: yarn build --scope @budibase/backend-core - name: Run tests run: | cd qa-core @@ -242,10 +206,10 @@ jobs: check-pro-submodule: runs-on: ubuntu-latest - if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase' + if: inputs.run_as_oss != true && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase') steps: - name: Checkout repo and submodules - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: true token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} @@ -268,23 +232,88 @@ jobs: echo "pro_commit=$pro_commit" echo "pro_commit=$pro_commit" >> "$GITHUB_OUTPUT" echo "base_commit=$base_commit" + + base_commit_excluding_merges=$(git log --no-merges -n 1 --format=format:%H $base_commit) + echo "base_commit_excluding_merges=$base_commit_excluding_merges" + echo "base_commit_excluding_merges=$base_commit_excluding_merges" >> "$GITHUB_OUTPUT" + else + echo "Nothing to do - branch to branch merge." + fi + + - name: Check submodule merged and latest on base branch + if: ${{ steps.get_pro_commits.outputs.base_commit_excluding_merges != '' }} + run: | + cd packages/pro + base_commit_excluding_merges='${{ steps.get_pro_commits.outputs.base_commit_excluding_merges }}' + pro_commit='${{ steps.get_pro_commits.outputs.pro_commit }}' + + any_commit=$(git log --no-merges $base_commit_excluding_merges...$pro_commit) + + if [ -n "$any_commit" ]; then + echo $any_commit + + echo "An error occurred: " + echo 'Submodule commit does not match the latest commit on the "${{ steps.get_pro_commits.outputs.target_branch }}" branch.' + echo 'Refer to the pro repo to merge your changes: https://github.com/Budibase/budibase-pro/blob/master/docs/getting_started.md' + + exit 1 + else + echo 'All good, the submodule had been merged and setup correctly!' + fi + + check-accountportal-submodule: + runs-on: ubuntu-latest + if: inputs.run_as_oss != true && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase') + steps: + - name: Checkout repo and submodules + uses: actions/checkout@v4 + with: + submodules: true + token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} + fetch-depth: 0 + + - uses: dorny/paths-filter@v3 + id: changes + with: + filters: | + src: + - packages/account-portal/** + + - if: steps.changes.outputs.src == 'true' + name: Check account portal commit + id: get_accountportal_commits + run: | + cd packages/account-portal + accountportal_commit=$(git rev-parse HEAD) + + branch="${{ github.base_ref || github.ref_name }}" + echo "Running on branch '$branch' (base_ref=${{ github.base_ref }}, ref_name=${{ github.head_ref }})" + + base_commit=$(git rev-parse origin/master) + + if [[ ! -z $base_commit ]]; then + echo "target_branch=$branch" + echo "target_branch=$branch" >> "$GITHUB_OUTPUT" + echo "accountportal_commit=$accountportal_commit" + echo "accountportal_commit=$accountportal_commit" >> "$GITHUB_OUTPUT" + echo "base_commit=$base_commit" echo "base_commit=$base_commit" >> "$GITHUB_OUTPUT" else echo "Nothing to do - branch to branch merge." fi - name: Check submodule merged to base branch - if: ${{ steps.get_pro_commits.outputs.base_commit != '' }} - uses: actions/github-script@v4 + if: ${{ steps.get_accountportal_commits.outputs.base_commit != '' }} + uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | - const submoduleCommit = '${{ steps.get_pro_commits.outputs.pro_commit }}'; - const baseCommit = '${{ steps.get_pro_commits.outputs.base_commit }}'; + const submoduleCommit = '${{ steps.get_accountportal_commits.outputs.accountportal_commit }}'; + const baseCommit = '${{ steps.get_accountportal_commits.outputs.base_commit }}'; if (submoduleCommit !== baseCommit) { - console.error('Submodule commit does not match the latest commit on the "${{ steps.get_pro_commits.outputs.target_branch }}" branch.'); - console.error('Refer to the pro repo to merge your changes: https://github.com/Budibase/budibase-pro/blob/develop/docs/getting_started.md') + console.error('Submodule commit does not match the latest commit on the "${{ steps.get_accountportal_commits.outputs.target_branch }}" branch.'); + console.error('Refer to the account portal repo to merge your changes: https://github.com/Budibase/account-portal/blob/master/docs/index.md') process.exit(1); } else { console.log('All good, the submodule had been merged and setup correctly!') diff --git a/.github/workflows/check-oss-contributor.yml b/.github/workflows/check-oss-contributor.yml new file mode 100644 index 0000000000..398f07a130 --- /dev/null +++ b/.github/workflows/check-oss-contributor.yml @@ -0,0 +1,35 @@ +name: OSS contributor checks +on: + workflow_dispatch: + schedule: + - cron: "0 8,16 * * 1-5" # on weekdays at 8am and 4pm + +jobs: + run-checks: + name: Publish server and worker docker images + uses: ./.github/workflows/budibase_ci.yml + with: + run_as_oss: true + secrets: inherit + + notify-error: + needs: ["run-checks"] + if: ${{ failure() }} + name: Notify error + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set commit SHA + id: set_sha + run: echo "::set-output name=sha::$(git rev-parse --short ${{ github.sha }})" + + - name: Notify error + uses: tsickert/discord-webhook@v5.3.0 + with: + webhook-url: ${{ secrets.OSS_CHECKS_WEBHOOK_URL }} + embed-title: 🚨 OSS checks failed in master + embed-url: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" + embed-description: | + Git sha: `${{ steps.set_sha.outputs.sha }}` diff --git a/.github/workflows/close-featurebranch.yml b/.github/workflows/close-featurebranch.yml index 46cb781730..0439aec443 100644 --- a/.github/workflows/close-featurebranch.yml +++ b/.github/workflows/close-featurebranch.yml @@ -2,9 +2,7 @@ name: close-featurebranch on: pull_request: - types: [closed] - branches: - - master + types: [closed, unlabeled] workflow_dispatch: inputs: BRANCH: @@ -14,9 +12,12 @@ on: jobs: release: + if: | + (github.event.action == 'closed' && contains(github.event.pull_request.labels.*.name, 'feature-branch')) || + github.event.label.name == 'feature-branch' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: passeidireto/trigger-external-workflow-action@main env: PAYLOAD_BRANCH: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.BRANCH || github.head_ref }} diff --git a/.github/workflows/deploy-cloud.yaml b/.github/workflows/deploy-cloud.yaml deleted file mode 100644 index 389b10f7d3..0000000000 --- a/.github/workflows/deploy-cloud.yaml +++ /dev/null @@ -1,48 +0,0 @@ -name: Budibase Deploy Production - -on: - workflow_dispatch: - inputs: - version: - description: Budibase release version. For example - 1.0.0 - required: false - -jobs: - release: - runs-on: ubuntu-latest - - steps: - - name: Fail if not a tag - run: | - if [[ $GITHUB_REF != refs/tags/* ]]; then - echo "Workflow Dispatch can only be run on tags" - exit 1 - fi - - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: Fail if tag is not in master - run: | - if ! git merge-base --is-ancestor ${{ github.sha }} origin/master; then - echo "Tag is not in master. This pipeline can only execute tags that are present on the master branch" - exit 1 - fi - - - name: Get the latest budibase release version - id: version - run: | - if [ -z "${{ github.event.inputs.version }}" ]; then - release_version=$(cat lerna.json | jq -r '.version') - else - release_version=${{ github.event.inputs.version }} - fi - echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV - - - uses: passeidireto/trigger-external-workflow-action@main - env: - PAYLOAD_VERSION: ${{ env.RELEASE_VERSION }} - with: - repository: budibase/budibase-deploys - event: budicloud-prod-deploy - github_pat: ${{ secrets.GH_ACCESS_TOKEN }} diff --git a/.github/workflows/deploy-featurebranch.yml b/.github/workflows/deploy-featurebranch.yml index f1fb12c087..eccc783dfb 100644 --- a/.github/workflows/deploy-featurebranch.yml +++ b/.github/workflows/deploy-featurebranch.yml @@ -2,14 +2,22 @@ name: deploy-featurebranch on: pull_request: - branches: - - master + types: [ + labeled, + # default types below (https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request) + opened, + synchronize, + reopened, + ] jobs: release: + if: | + (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase') && + contains(github.event.pull_request.labels.*.name, 'feature-branch') runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: passeidireto/trigger-external-workflow-action@main env: PAYLOAD_BRANCH: ${{ github.head_ref }} diff --git a/.github/workflows/deploy-qa.yml b/.github/workflows/deploy-qa.yml index d850d289ff..1339ad2eb9 100644 --- a/.github/workflows/deploy-qa.yml +++ b/.github/workflows/deploy-qa.yml @@ -11,10 +11,12 @@ jobs: runs-on: ubuntu-latest steps: - uses: peter-evans/repository-dispatch@v2 - env: - PAYLOAD_VERSION: ${{ github.sha }} - REF_NAME: ${{ github.ref_name}} with: repository: budibase/budibase-deploys event-type: budicloud-qa-deploy token: ${{ secrets.GH_ACCESS_TOKEN }} + client-payload: |- + { + "VERSION": "${{ github.sha }}", + "REF_NAME": "${{ github.ref_name}}" + } diff --git a/.github/workflows/force-release.yml b/.github/workflows/force-release.yml new file mode 100644 index 0000000000..8a9d444f51 --- /dev/null +++ b/.github/workflows/force-release.yml @@ -0,0 +1,46 @@ +name: Forced release +concurrency: + group: tag-release + cancel-in-progress: false + +on: + workflow_dispatch: + +jobs: + ensure-is-master-tag: + name: Ensure is a master tag + runs-on: qa-arc-runner-set + steps: + - name: Checkout monorepo + uses: actions/checkout@v4 + with: + token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} + fetch-tags: true + fetch-depth: 0 + + - name: Fail if ref is not a tag + run: | + if ! git show-ref -q --verify "refs/tags/${{ github.ref_name }}" 2>/dev/null; then + echo "'${{ github.ref_name }}' is not a valid tag." + exit 1 + fi + - name: Fail if tag is not in master + run: | + if ! git merge-base --is-ancestor ${{ github.ref_name }} origin/master; then + echo "Tag is not in master. Release can only execute tags that are present on the master branch" + exit 1 + fi + + trigger-release: + needs: [ensure-is-master-tag] + runs-on: ubuntu-latest + steps: + - uses: peter-evans/repository-dispatch@v2 + with: + repository: budibase/budibase-deploys + event-type: release-prod + token: ${{ secrets.GH_ACCESS_TOKEN }} + client-payload: |- + { + "TAG": "${{ github.ref_name }}" + } diff --git a/.github/workflows/release-master.yml b/.github/workflows/release-master.yml deleted file mode 100644 index df25182cd6..0000000000 --- a/.github/workflows/release-master.yml +++ /dev/null @@ -1,181 +0,0 @@ -name: Budibase Release -concurrency: - group: release - cancel-in-progress: false - -on: - push: - tags: - - "[0-9]+.[0-9]+.[0-9]+" - # Exclude all pre-releases - - "!*[0-9]+.[0-9]+.[0-9]+-*" - -env: - # Posthog token used by ui at build time - POSTHOG_TOKEN: phc_bIjZL7oh2GEUd2vqvTBH8WvrX0fWTFQMs6H5KQxiUxU - INTERCOM_TOKEN: ${{ secrets.INTERCOM_TOKEN }} - PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} - -jobs: - release-images: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - with: - submodules: true - token: ${{ secrets.PERSONAL_ACCESS_TOKEN }} - fetch-depth: 0 - - - name: Fail if tag is not in master - run: | - if ! git merge-base --is-ancestor ${{ github.sha }} origin/master; then - echo "Tag is not in master. This pipeline can only execute tags that are present on the master branch" - exit 1 - fi - - - uses: actions/setup-node@v1 - with: - node-version: 18.x - cache: yarn - - - run: yarn install --frozen-lockfile - - name: Update versions - run: ./scripts/updateVersions.sh - - run: yarn lint - - run: yarn build - - run: yarn build:sdk - - - name: Publish budibase packages to NPM - env: - NPM_TOKEN: ${{ secrets.NPM_TOKEN }} - run: | - # setup the username and email. I tend to use 'GitHub Actions Bot' with no email by default - git config --global user.name "Budibase Release Bot" - git config --global user.email "<>" - git submodule foreach git commit -a -m 'Release process' - git commit -a -m 'Release process' - echo //registry.npmjs.org/:_authToken=${NPM_TOKEN} >> .npmrc - yarn release - - - name: "Get Current tag" - id: currenttag - run: | - version=$(./scripts/getCurrentVersion.sh) - echo "Using tag $version" - echo "version=$version" >> "$GITHUB_OUTPUT" - - - name: Setup Docker Buildx - id: buildx - uses: docker/setup-buildx-action@v1 - - - name: Docker login - run: | - docker login -u $DOCKER_USER -p $DOCKER_PASSWORD - env: - DOCKER_USER: ${{ secrets.DOCKER_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }} - - - name: Build worker docker - uses: docker/build-push-action@v5 - with: - context: . - push: true - platforms: linux/amd64,linux/arm64 - build-args: | - BUDIBASE_VERSION=${{ env.BUDIBASE_VERSION }} - tags: ${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }} - file: ./packages/worker/Dockerfile.v2 - cache-from: type=registry,ref=${{ env.IMAGE_NAME }}:latest - cache-to: type=inline - env: - IMAGE_NAME: budibase/worker - IMAGE_TAG: ${{ steps.currenttag.outputs.version }} - BUDIBASE_VERSION: ${{ steps.currenttag.outputs.version }} - - - name: Build server docker - uses: docker/build-push-action@v5 - with: - context: . - push: true - platforms: linux/amd64,linux/arm64 - build-args: | - BUDIBASE_VERSION=${{ env.BUDIBASE_VERSION }} - tags: ${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }} - file: ./packages/server/Dockerfile.v2 - cache-from: type=registry,ref=${{ env.IMAGE_NAME }}:latest - cache-to: type=inline - env: - IMAGE_NAME: budibase/apps - IMAGE_TAG: ${{ steps.currenttag.outputs.version }} - BUDIBASE_VERSION: ${{ steps.currenttag.outputs.version }} - - - name: Build proxy docker - uses: docker/build-push-action@v5 - with: - context: ./hosting/proxy - push: true - platforms: linux/amd64,linux/arm64 - tags: ${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }} - file: ./hosting/proxy/Dockerfile - cache-from: type=registry,ref=${{ env.IMAGE_NAME }}:latest - cache-to: type=inline - env: - IMAGE_NAME: budibase/proxy - IMAGE_TAG: ${{ steps.currenttag.outputs.version }} - - release-helm-chart: - needs: [release-images] - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Setup Helm - uses: azure/setup-helm@v1 - id: helm-install - - - name: Get the latest budibase release version - id: version - run: | - release_version=$(cat lerna.json | jq -r '.version') - echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV - - # due to helm repo index issue: https://github.com/helm/helm/issues/7363 - # we need to create new package in a different dir, merge the index and move the package back - - name: Build and release helm chart - run: | - git config user.name "Budibase Helm Bot" - git config user.email "<>" - git reset --hard - git fetch - mkdir sync - echo "Packaging chart to sync dir" - helm package charts/budibase --version 0.0.0-master --app-version "$RELEASE_VERSION" --destination sync - echo "Packaging successful" - git checkout gh-pages - echo "Indexing helm repo" - helm repo index --merge docs/index.yaml sync - mv -f sync/* docs - rm -rf sync - echo "Pushing new helm release" - git add -A - git commit -m "Helm Release: ${{ env.RELEASE_VERSION }}" - git push - - trigger-deploy-to-qa-env: - needs: [release-helm-chart] - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Get the current budibase release version - id: version - run: | - release_version=$(cat lerna.json | jq -r '.version') - echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV - - - uses: passeidireto/trigger-external-workflow-action@main - env: - PAYLOAD_VERSION: ${{ env.RELEASE_VERSION }} - REF_NAME: ${{ github.ref_name}} - with: - repository: budibase/budibase-deploys - event: budicloud-qa-deploy - github_pat: ${{ secrets.GH_ACCESS_TOKEN }} diff --git a/.github/workflows/release-selfhost.yml b/.github/workflows/release-selfhost.yml deleted file mode 100644 index d2689a0ea0..0000000000 --- a/.github/workflows/release-selfhost.yml +++ /dev/null @@ -1,125 +0,0 @@ -name: Budibase Release Selfhost - -on: - workflow_dispatch: - -jobs: - release: - runs-on: ubuntu-latest - - steps: - - name: Fail if not a tag - run: | - if [[ $GITHUB_REF != refs/tags/* ]]; then - echo "Workflow Dispatch can only be run on tags" - exit 1 - fi - - - uses: actions/checkout@v2 - with: - submodules: true - token: ${{ secrets.PERSONAL_ACCESS_TOKEN }} - fetch-depth: 0 - - - name: Fail if tag is not in master - run: | - if ! git merge-base --is-ancestor ${{ github.sha }} origin/master; then - echo "Tag is not in master. This pipeline can only execute tags that are present on the master branch" - exit 1 - fi - - - name: Use Node.js 18.x - uses: actions/setup-node@v1 - with: - node-version: 18.x - - - name: Get the latest budibase release version - id: version - run: | - release_version=$(cat lerna.json | jq -r '.version') - echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV - - - name: Tag and release Docker images (Self Host) - run: | - docker login -u $DOCKER_USER -p $DOCKER_PASSWORD - - release_tag=${{ env.RELEASE_VERSION }} - - # Pull apps and worker images - docker pull budibase/apps:$release_tag - docker pull budibase/worker:$release_tag - docker pull budibase/proxy:$release_tag - - # Tag apps and worker images - docker tag budibase/apps:$release_tag budibase/apps:$SELFHOST_TAG - docker tag budibase/worker:$release_tag budibase/worker:$SELFHOST_TAG - docker tag budibase/proxy:$release_tag budibase/proxy:$SELFHOST_TAG - - # Push images - docker push budibase/apps:$SELFHOST_TAG - docker push budibase/worker:$SELFHOST_TAG - docker push budibase/proxy:$SELFHOST_TAG - env: - DOCKER_USER: ${{ secrets.DOCKER_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }} - SELFHOST_TAG: latest - - - name: Bootstrap and build (CLI) - run: | - yarn - yarn build - - - name: Build OpenAPI spec - run: | - pushd packages/server - yarn - yarn specs - popd - - - name: Setup Helm - uses: azure/setup-helm@v1 - id: helm-install - - # due to helm repo index issue: https://github.com/helm/helm/issues/7363 - # we need to create new package in a different dir, merge the index and move the package back - - name: Build and release helm chart - run: | - git config user.name "Budibase Helm Bot" - git config user.email "<>" - git reset --hard - git fetch - mkdir sync - echo "Packaging chart to sync dir" - helm package charts/budibase --version "$RELEASE_VERSION" --app-version "$RELEASE_VERSION" --destination sync - echo "Packaging successful" - git checkout gh-pages - echo "Indexing helm repo" - helm repo index --merge docs/index.yaml sync - mv -f sync/* docs - rm -rf sync - echo "Pushing new helm release" - git add -A - git commit -m "Helm Release: ${{ env.RELEASE_VERSION }}" - git push - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Perform Github Release - uses: softprops/action-gh-release@v1 - with: - name: ${{ env.RELEASE_VERSION }} - tag_name: ${{ env.RELEASE_VERSION }} - generate_release_notes: true - files: | - packages/cli/build/cli-win.exe - packages/cli/build/cli-linux - packages/cli/build/cli-macos - packages/server/specs/openapi.yaml - packages/server/specs/openapi.json - - - name: Discord Webhook Action - uses: tsickert/discord-webhook@v4.0.0 - with: - webhook-url: ${{ secrets.PROD_DEPLOY_WEBHOOK_URL }} - content: "Self Host Deployment Complete: ${{ env.RELEASE_VERSION }} deployed to Self Host." - embed-title: ${{ env.RELEASE_VERSION }} diff --git a/.github/workflows/release-singleimage.yml b/.github/workflows/release-singleimage.yml deleted file mode 100644 index a3444d5e7a..0000000000 --- a/.github/workflows/release-singleimage.yml +++ /dev/null @@ -1,86 +0,0 @@ -name: Deploy Budibase Single Container Image to DockerHub - -on: - workflow_dispatch: - -env: - CI: true - PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} - REGISTRY_URL: registry.hub.docker.com -jobs: - build: - name: "build" - runs-on: ubuntu-latest - strategy: - matrix: - node-version: [18.x] - steps: - - name: Maximize build space - uses: easimon/maximize-build-space@master - with: - root-reserve-mb: 30000 - swap-size-mb: 1024 - remove-android: "true" - remove-dotnet: "true" - - name: Fail if not a tag - run: | - if [[ $GITHUB_REF != refs/tags/* ]]; then - echo "Workflow Dispatch can only be run on tags" - exit 1 - fi - - name: "Checkout" - uses: actions/checkout@v2 - with: - submodules: true - token: ${{ secrets.PERSONAL_ACCESS_TOKEN }} - - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v1 - with: - node-version: ${{ matrix.node-version }} - - name: Setup QEMU - uses: docker/setup-qemu-action@v1 - - name: Setup Docker Buildx - id: buildx - uses: docker/setup-buildx-action@v1 - - name: Run Yarn - run: yarn - - name: Update versions - run: ./scripts/updateVersions.sh - - name: Run Yarn Build - run: yarn build - - name: Login to Docker Hub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_API_KEY }} - - name: Get the latest release version - id: version - run: | - release_version=$(cat lerna.json | jq -r '.version') - echo $release_version - echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV - - name: Tag and release Budibase service docker image - uses: docker/build-push-action@v2 - with: - context: . - push: true - platforms: linux/amd64,linux/arm64 - build-args: BUDIBASE_VERSION=$BUDIBASE_VERSION - tags: budibase/budibase,budibase/budibase:${{ env.RELEASE_VERSION }} - file: ./hosting/single/Dockerfile.v2 - env: - BUDIBASE_VERSION: ${{ env.RELEASE_VERSION }} - - name: Tag and release Budibase Azure App Service docker image - uses: docker/build-push-action@v2 - with: - context: . - push: true - platforms: linux/amd64 - build-args: | - TARGETBUILD=aas - BUDIBASE_VERSION=$BUDIBASE_VERSION - tags: budibase/budibase-aas,budibase/budibase-aas:${{ env.RELEASE_VERSION }} - file: ./hosting/single/Dockerfile.v2 - env: - BUDIBASE_VERSION: ${{ env.RELEASE_VERSION }} diff --git a/.github/workflows/stale_bot.yml b/.github/workflows/stale_bot.yml index 49e3473e63..411a70a463 100644 --- a/.github/workflows/stale_bot.yml +++ b/.github/workflows/stale_bot.yml @@ -10,13 +10,14 @@ jobs: steps: - uses: actions/stale@v8 with: + days-before-stale: 330 operations-per-run: 1 # stale rules for PRs days-before-pr-stale: 7 stale-issue-label: stale exempt-pr-labels: pinned,security,roadmap - days-before-pr-close: 7 + days-before-issue-close: 30 - uses: actions/stale@v8 with: @@ -25,6 +26,7 @@ jobs: days-before-stale: 30 only-issue-labels: bug,High priority stale-issue-label: warn + days-before-close: 30 - uses: actions/stale@v8 with: @@ -33,6 +35,7 @@ jobs: days-before-stale: 90 only-issue-labels: bug,Medium priority stale-issue-label: warn + days-before-close: 30 - uses: actions/stale@v8 with: @@ -42,5 +45,4 @@ jobs: stale-issue-label: stale only-issue-labels: bug stale-issue-message: "This issue has been automatically marked as stale because it has not had any activity for six months." - days-before-close: 30 diff --git a/.github/workflows/tag-release.yml b/.github/workflows/tag-release.yml index eaf71ae61a..483e895e98 100644 --- a/.github/workflows/tag-release.yml +++ b/.github/workflows/tag-release.yml @@ -1,4 +1,4 @@ -name: Tag release +name: Release concurrency: group: tag-release cancel-in-progress: false @@ -19,6 +19,8 @@ on: jobs: tag-release: runs-on: ubuntu-latest + outputs: + version: ${{ steps.tag-release.outputs.version }} steps: - name: Fail if branch is not master @@ -26,13 +28,14 @@ jobs: run: | echo "Ref is not master, you must run this job from master." exit 1 - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: submodules: true token: ${{ secrets.PERSONAL_ACCESS_TOKEN }} - run: cd scripts && yarn - name: Tag release + id: tag-release run: | cd scripts # setup the username and email. @@ -41,3 +44,23 @@ jobs: BUMP_TYPE_INPUT=${{ github.event.inputs.versioning }} BUMP_TYPE=${BUMP_TYPE_INPUT:-"patch"} ./versionCommit.sh $BUMP_TYPE + + cd .. + new_version=$(./scripts/getCurrentVersion.sh) + echo "version=$new_version" >> $GITHUB_OUTPUT + + trigger-release: + needs: [tag-release] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: peter-evans/repository-dispatch@v2 + with: + repository: budibase/budibase-deploys + event-type: release-prod + token: ${{ secrets.GH_ACCESS_TOKEN }} + client-payload: |- + { + "TAG": "${{ needs.tag-release.outputs.version }}" + } diff --git a/.gitignore b/.gitignore index 02e0ca300d..661c60e95e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,3 @@ -builder/* .data/ .temp/ packages/server/runtime_apps/ @@ -6,6 +5,9 @@ packages/server/runtime_apps/ bb-airgapped.tar.gz *.iml +packages/server/build/oldClientVersions/**/* +packages/builder/src/components/deploy/clientVersions.json + # Logs logs *.log @@ -41,8 +43,11 @@ bower_components build/Release # Dependency directories -/node_modules/ jspm_packages/ +*.min.js +*.map +node_modules/ +dist/ # TypeScript v1 declaration files typings/ @@ -105,3 +110,4 @@ budibase-component budibase-datasource *.iml +.nx \ No newline at end of file diff --git a/.gitmodules b/.gitmodules index 2dd6ea53f2..cb6d1c5dc8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ [submodule "packages/pro"] path = packages/pro url = git@github.com:Budibase/budibase-pro.git +[submodule "packages/account-portal"] + path = packages/account-portal + url = git@github.com:Budibase/account-portal.git diff --git a/.nvmrc b/.nvmrc index 7950a44576..790e1105f2 100644 --- a/.nvmrc +++ b/.nvmrc @@ -1 +1 @@ -v18.17.0 +v20.10.0 diff --git a/.prettierignore b/.prettierignore index 64607d74ab..87f0191a94 100644 --- a/.prettierignore +++ b/.prettierignore @@ -1,13 +1,15 @@ node_modules dist -*.spec.js -packages/builder/src/components/design/AppPreview/CurrentItemPreview.svelte packages/server/builder packages/server/coverage -packages/worker/coverage -packages/backend-core/coverage packages/server/client packages/server/src/definitions/openapi.ts +packages/worker/coverage +packages/backend-core/coverage packages/builder/.routify packages/sdk/sdk -packages/pro/coverage \ No newline at end of file +packages/pro/coverage +packages/account-portal/packages/ui/build +packages/account-portal/packages/ui/.routify +packages/account-portal/packages/server/build +**/*.ivm.bundle.js \ No newline at end of file diff --git a/.tool-versions b/.tool-versions index a909d60941..946d5198ce 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,3 +1,3 @@ -nodejs 18.17.0 +nodejs 20.10.0 python 3.10.0 yarn 1.22.19 diff --git a/.vscode/launch.json b/.vscode/launch.json index cfd8d7b155..2fda61345b 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -1,4 +1,3 @@ - { // Use IntelliSense to learn about possible attributes. // Hover to view descriptions of existing attributes. @@ -20,6 +19,13 @@ "runtimeArgs": ["--nolazy", "-r", "ts-node/register/transpile-only"], "args": ["${workspaceFolder}/packages/worker/src/index.ts"], "cwd": "${workspaceFolder}/packages/worker" + }, + { + "type": "chrome", + "request": "launch", + "name": "Launch Chrome against localhost", + "url": "http://localhost:10000", + "webRoot": "${workspaceFolder}" } ], "compounds": [ diff --git a/.vscode/settings.json b/.vscode/settings.json index ece537efac..e22d5a8866 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,7 +1,7 @@ { "editor.formatOnSave": true, "editor.codeActionsOnSave": { - "source.fixAll": true + "source.fixAll": "explicit" }, "editor.defaultFormatter": "esbenp.prettier-vscode", "[json]": { diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000000..69d69ab7d0 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,3 @@ +/packages/server @Budibase/backend +/packages/worker @Budibase/backend +/packages/backend-core @Budibase/backend diff --git a/LICENSE b/LICENSE index a017209adf..cbb55109f4 100644 --- a/LICENSE +++ b/LICENSE @@ -1,7 +1,9 @@ -Copyright 2019-2021, Budibase Ltd. +Copyright 2019-2023, Budibase Ltd. Each Budibase package has its own license, please check the license file in each package. You can consider Budibase to be GPLv3 licensed overall. The apps that you build with Budibase do not package any GPLv3 licensed code, thus do not fall under those restrictions. + +Budibase ships with Structured Query Server, by The Neighbourhoodie Software GmbH. This license for this can be found at ./SQS_LICENSE diff --git a/README.md b/README.md index 7827d4e48a..4979f0ee8e 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@

- Budibase + Budibase

@@ -11,7 +11,7 @@ The low code platform you'll enjoy using

- Budibase is an open source low-code platform, and the easiest way to build internal apps that improve productivity. + Budibase is an open-source low-code platform that saves engineers 100s of hours building forms, portals, and approval apps, securely.

@@ -20,7 +20,7 @@

- Budibase design ui + Budibase design ui

@@ -57,7 +57,7 @@ ## ✨ Features ### Build and ship real software -Unlike other platforms, with Budibase you build and ship single page applications. Budibase applications have performance baked in and can be designed responsively, providing your users with a great experience. +Unlike other platforms, with Budibase you build and ship single page applications. Budibase applications have performance baked in and can be designed responsively, providing users with a great experience.

### Open source and extensible @@ -65,40 +65,36 @@ Budibase is open-source - licensed as GPL v3. This should fill you with confiden

### Load data or start from scratch -Budibase pulls in data from multiple sources, including MongoDB, CouchDB, PostgreSQL, MySQL, Airtable, S3, DynamoDB, or a REST API. And unlike other platforms, with Budibase you can start from scratch and create business apps with no datasources. [Request new datasources](https://github.com/Budibase/budibase/discussions?discussions_q=category%3AIdeas). +Budibase pulls data from multiple sources, including MongoDB, CouchDB, PostgreSQL, MySQL, Airtable, S3, DynamoDB, or a REST API. And unlike other platforms, with Budibase you can start from scratch and create business apps with no data sources. [Request new datasources](https://github.com/Budibase/budibase/discussions?discussions_q=category%3AIdeas).

- Budibase data + Budibase data



### Design and build apps with powerful pre-made components -Budibase comes out of the box with beautifully designed, powerful components which you can use like building blocks to build your UI. We also expose a lot of your favourite CSS styling options so you can go that extra creative mile. [Request new component](https://github.com/Budibase/budibase/discussions?discussions_q=category%3AIdeas). +Budibase comes out of the box with beautifully designed, powerful components which you can use like building blocks to build your UI. We also expose many of your favourite CSS styling options so you can go that extra creative mile. [Request new component](https://github.com/Budibase/budibase/discussions?discussions_q=category%3AIdeas).

- Budibase design + Budibase design



-### Automate processes, integrate with other tools, and connect to webhooks -Save time by automating manual processes and workflows. From connecting to webhooks, to automating emails, simply tell Budibase what to do and let it work for you. You can easily [create new automations for Budibase here](https://github.com/Budibase/automations) or [Request new automation](https://github.com/Budibase/budibase/discussions?discussions_q=category%3AIdeas). - -

- Budibase automations -

+### Automate processes, integrate with other tools and connect to webhooks +Save time by automating manual processes and workflows. From connecting to webhooks to automating emails, simply tell Budibase what to do and let it work for you. You can easily [create new automations for Budibase here](https://github.com/Budibase/automations) or [Request new automation](https://github.com/Budibase/budibase/discussions?discussions_q=category%3AIdeas).

### Integrate with your favorite tools Budibase integrates with a number of popular tools allowing you to build apps that perfectly fit your stack.

- Budibase integrations + Budibase integrations



-### Admin paradise -Budibase is made to scale. With Budibase, you can self-host on your own infrastructure and globally manage users, onboarding, SMTP, apps, groups, theming and more. You can also provide users/groups with an app portal and disseminate user-management to the group manager. +### Deploy with confidence and security +Budibase is made to scale. With Budibase, you can self-host on your own infrastructure and globally manage users, onboarding, SMTP, apps, groups, theming and more. You can also provide users/groups with an app portal and disseminate user management to the group manager. - Checkout the promo video: https://youtu.be/xoljVpty_Kw @@ -119,17 +115,14 @@ As with anything that we build in Budibase, our new public API is simple to use, #### Docs You can learn more about the Budibase API at the following places: -- [General documentation](https://docs.budibase.com/docs/public-api) : Learn how to get your API key, how to use spec, and how to use with Postman +- [General documentation](https://docs.budibase.com/docs/public-api): Learn how to get your API key, how to use spec, and how to use Postman - [Interactive API documentation](https://docs.budibase.com/reference/post_applications) : Learn how to interact with the API -#### Guides - -- [Build an app with Budibase and Next.js](https://budibase.com/blog/building-a-crud-app-with-budibase-and-next.js/) +

## 🏁 Get started -Deploy Budibase self-hosted in your existing infrastructure, using Docker, Kubernetes, and Digital Ocean. -Or use Budibase Cloud if you don't need to self-host, and would like to get started quickly. +Deploy Budibase using Docker, Kubernetes, and Digital Ocean on your existing infrastructure. Or use Budibase Cloud if you don't need to self-host and would like to get started quickly. ### [Get started with self-hosting Budibase](https://docs.budibase.com/docs/hosting-methods) @@ -162,7 +155,7 @@ If you have a question or would like to talk with other Budibase users and join ## ❗ Code of conduct -Budibase is dedicated to providing a welcoming, diverse, and harrassment-free experience for everyone. We expect everyone in the Budibase community to abide by our [**Code of Conduct**](https://github.com/Budibase/budibase/blob/HEAD/docs/CODE_OF_CONDUCT.md). Please read it. +Budibase is dedicated to providing everyone a welcoming, diverse, and harassment-free experience. We expect everyone in the Budibase community to abide by our [**Code of Conduct**](https://github.com/Budibase/budibase/blob/HEAD/docs/CODE_OF_CONDUCT.md). Please read it.
@@ -171,16 +164,16 @@ Budibase is dedicated to providing a welcoming, diverse, and harrassment-free ex ## 🙌 Contributing to Budibase -From opening a bug report to creating a pull request: every contribution is appreciated and welcomed. If you're planning to implement a new feature or change the API please create an issue first. This way we can ensure your work is not in vain. -Environment setup instructions are available for [Debian](https://github.com/Budibase/budibase/tree/HEAD/docs/DEV-SETUP-DEBIAN.md) and [MacOSX](https://github.com/Budibase/budibase/tree/HEAD/docs/DEV-SETUP-MACOSX.md) +From opening a bug report to creating a pull request: every contribution is appreciated and welcomed. If you're planning to implement a new feature or change the API, please create an issue first. This way, we can ensure your work is not in vain. +Environment setup instructions are available [here](https://github.com/Budibase/budibase/tree/HEAD/docs/CONTRIBUTING.md). ### Not Sure Where to Start? -A good place to start contributing, is the [First time issues project](https://github.com/Budibase/budibase/projects/22). +A good place to start contributing is the [First time issues project](https://github.com/Budibase/budibase/projects/22). ### How the repository is organized Budibase is a monorepo managed by lerna. Lerna manages the building and publishing of the budibase packages. At a high level, here are the packages that make up Budibase. -- [packages/builder](https://github.com/Budibase/budibase/tree/HEAD/packages/builder) - contains code for the budibase builder client side svelte application. +- [packages/builder](https://github.com/Budibase/budibase/tree/HEAD/packages/builder) - contains code for the budibase builder client-side svelte application. - [packages/client](https://github.com/Budibase/budibase/tree/HEAD/packages/client) - A module that runs in the browser responsible for reading JSON definition and creating living, breathing web apps from it. @@ -193,7 +186,7 @@ For more information, see [CONTRIBUTING.md](https://github.com/Budibase/budibase ## 📝 License -Budibase is open-source, licensed as [GPL v3](https://www.gnu.org/licenses/gpl-3.0.en.html). The client and component libraries are licensed as [MPL](https://directory.fsf.org/wiki/License:MPL-2.0) - so the apps that you build can be licensed however you like. +Budibase is open-source, licensed as [GPL v3](https://www.gnu.org/licenses/gpl-3.0.en.html). The client and component libraries are licensed as [MPL](https://directory.fsf.org/wiki/License:MPL-2.0) - so the apps you build can be licensed however you like.

diff --git a/SQS_LICENSE b/SQS_LICENSE new file mode 100644 index 0000000000..0315ee9527 --- /dev/null +++ b/SQS_LICENSE @@ -0,0 +1,31 @@ +FORM OF CUSTOMER LICENCE + +Budibase hereby grants the Customer a worldwide, royalty free, non-exclusive, +perpetual (for the lifetime of the intellectual property rights contained in the Product) +right and title to utilise the binary code of the The Neighbourhoodie Software GmbH +Structured Query Server software product (Product) for its own internal business +purposes (the Purpose) only (the Licence). The Product has the function of bringing a +CouchDB database (NoSQL database) into an SQL database form (SQLite) and thereby +making it usable for complex queries - which originally could only be displayed in an +SQL database. By indexing in SQLite and a server that is tailored to it, the Product +enables the use of CouchDB with SQL queries. +The Licence shall not permit sub-licensing, resale or transfer of the Product to third +parties, other than sub-licensing to the Customer’s direct contractors for the purposes +of utilizing the Product as contemplated above. +The Licence shall not permit the adaptation, modification, decompilation, reverse +engineering or similar activities with respect to the Product. +This licence is granted to the Customer only, although Customer and its Affiliates’ +employees, servants and agents shall be entitled to utilize the Product within the scope +of the Licence for the Customer’s Purpose only. +Reproduction is not permitted to users, except for reproductions that are necessary for +the use of the product under the licence described above. These conditions apply to the +product regardless of the form in which we make the product available and on which +devices it is installed and/or with which devices it is ultimately used. Depending on the +product variant or intended use, certain technical requirements in the IT infrastructure +must be satisfied as a prerequisite for use. +The law of the Northern Ireland applies exclusively to this licence, and the courts of +Northern Ireland shall have exclusive jurisdiction, save that we reserve a right to sue +you in the jurisdiction in which you are based. The application of the UN Sales +Convention (CISG) is excluded. +The invalidity of any part of this licence does not affect the validity of the remaining +regulations. diff --git a/charts/budibase/Chart.lock b/charts/budibase/Chart.lock index 75b9de07b5..3ee752a362 100644 --- a/charts/budibase/Chart.lock +++ b/charts/budibase/Chart.lock @@ -1,9 +1,6 @@ dependencies: - name: couchdb repository: https://apache.github.io/couchdb-helm - version: 3.3.4 -- name: ingress-nginx - repository: https://kubernetes.github.io/ingress-nginx - version: 4.0.13 -digest: sha256:20892705c2d8e64c98257d181063a514ac55013e2b43399a6e54868a97f97845 -generated: "2021-12-30T18:55:30.878411Z" + version: 4.3.0 +digest: sha256:94449a7f195b186f5af33ec5aa66d58b36bede240fae710f021ca87837b30606 +generated: "2023-11-20T17:43:02.777596Z" diff --git a/charts/budibase/Chart.yaml b/charts/budibase/Chart.yaml index 05b3f24dbd..e2c9378f2c 100644 --- a/charts/budibase/Chart.yaml +++ b/charts/budibase/Chart.yaml @@ -17,10 +17,6 @@ version: 0.0.0 appVersion: 0.0.0 dependencies: - name: couchdb - version: 3.3.4 + version: 4.3.0 repository: https://apache.github.io/couchdb-helm condition: services.couchdb.enabled - - name: ingress-nginx - version: 4.0.13 - repository: https://kubernetes.github.io/ingress-nginx - condition: ingress.nginx diff --git a/charts/budibase/README.md b/charts/budibase/README.md index efa78ba75c..dea7d1dbae 100644 --- a/charts/budibase/README.md +++ b/charts/budibase/README.md @@ -1,39 +1,228 @@ -# Budibase +# budibase -[Budibase](https://budibase.com/) Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes. - -## TL;DR; -```console -$ cd chart -$ helm install budibase . -``` - -## Introduction - -This chart bootstraps a [Budibase](https://budibase.com/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. +Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes. ## Prerequisites -- helm v3 or above +- `helm` v3 or above - Kubernetes 1.4+ -- PV provisioner support in the underlying infrastructure (with persistence storage enabled) +- A storage controller (if you want to use persistent storage) +- An ingress controller (if you want to define an `Ingress` resource) +- `metrics-server` (if you want to make use of horizontal pod autoscaling) -## Installing the Chart +## Chart dependencies -To install the chart with the release name `budi-release`: +This chart depends on the official Apache CouchDB chart. You can see its +documentation here: +. + +## Upgrading + +### `2.x` to `3.0.0` + +We made a number of breaking changes in this release to make the chart more +idiomatic and easier to use. + +1. We no longer bundle `ingress-nginx`. If you were relying on this to supply + an ingress controller to your cluster, you will now need to deploy that + separately. You'll find guidance for that here: + . +2. We've upgraded the version of the [CouchDB chart](https://github.com/apache/couchdb-helm) + we use from `3.3.4` to `4.3.0`. The primary motivation for this was to align + the CouchDB chart used with the CouchDB version used, which has also updated + from 3.1.1 to 3.2.1. Additionally, we're moving away from the official CouchDB + to one we're building ourselves. +3. We've separated out the supplied AWS ALB ingress resource for those deploying + into EKS. Where previously you enabled this by setting `ingress.enabled: false` + and `ingress.aws: true`, you now set `awsAlbIngress.enabled: true` and all + configuration for it is under `awsAlbIngress`. +4. The `HorizontalPodAutoscaler` that was configured at `hpa.enabled: true` has + been split into 3 separate HPAs, one for each of `apps`, `worker`, and `proxy`. + They are configured at `services.{apps,worker,proxy}.autoscaling`. + +## Installing + +To install the chart from our repository: ```console -$ helm install budi-release . +$ helm repo add budibase https://budibase.github.io/budibase/ +$ helm repo update +$ helm install --create-namespace --namespace budibase budibase budibase/budibase ``` -The command deploys Budibase on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. - -> **Tip**: List all releases using `helm list` - -## Uninstalling the Chart - -To uninstall/delete the `my-release` deployment: +To install the chart from this repo: ```console -$ helm delete my-release +$ git clone git@github.com:budibase/budibase.git +$ cd budibase/charts/budibase +$ helm install --create-namespace --namespace budibase budibase . ``` + +## Example minimal configuration + +Here's an example `values.yaml` that would get a Budibase instance running in a home +cluster using an nginx ingress controller and NFS as cluster storage (basically one of our +staff's homelabs). + +
+ +```yaml +ingress: + enabled: true + className: "nginx" + hosts: + - host: budibase.local # set this to whatever DNS name you'd use + paths: + - backend: + service: + name: proxy-service + port: + number: 10000 + path: / + pathType: Prefix + +couchdb: + persistentVolume: + enabled: true + storageClass: "nfs-client" + adminPassword: admin + +services: + objectStore: + storageClass: "nfs-client" + redis: + storageClass: "nfs-client" +``` + +If you wanted to use this when bringing up Budibase in your own cluster, you could save it +to your hard disk and run the following: + +```console +$ helm install --create-namespace --namespace budibase budibase . -f values.yaml +``` + +
+ +## Configuring + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Sets the affinity for all pods created by this chart. Should not ordinarily need to be changed. See for more information on affinity. | +| awsAlbIngress.certificateArn | string | `""` | If you're wanting to use HTTPS, you'll need to create an ACM certificate and specify the ARN here. | +| awsAlbIngress.enabled | bool | `false` | Whether to create an ALB Ingress resource pointing to the Budibase proxy. Requires the AWS ALB Ingress Controller. | +| couchdb.clusterSize | int | `1` | The number of replicas to run in the CouchDB cluster. We set this to 1 by default to make things simpler, but you can set it to 3 if you need a high-availability CouchDB cluster. | +| couchdb.couchdbConfig.couchdb.uuid | string | `"budibase-couchdb"` | Unique identifier for this CouchDB server instance. You shouldn't need to change this. | +| couchdb.image | object | `{}` | We use a custom CouchDB image for running Budibase and we don't support using any other CouchDB image. You shouldn't change this, and if you do we can't guarantee that Budibase will work. | +| globals.apiEncryptionKey | string | `""` | Used for encrypting API keys and environment variables when stored in the database. You don't need to set this if `createSecrets` is true. | +| globals.appVersion | string | `""` | The version of Budibase to deploy. Defaults to what's specified by {{ .Chart.AppVersion }}. Ends up being used as the image version tag for the apps, proxy, and worker images. | +| globals.automationMaxIterations | string | `"200"` | The maximum number of iterations allows for an automation loop step. You can read more about looping here: . | +| globals.budibaseEnv | string | `"PRODUCTION"` | Sets the environment variable BUDIBASE_ENVIRONMENT for the apps and worker pods. Should not ordinarily need to be changed. | +| globals.cookieDomain | string | `""` | Sets the domain attribute of the cookie that Budibase uses to store session information. See for details on why you might want to set this. | +| globals.createSecrets | bool | `true` | Create an internal API key, JWT secret, object store access key and secret, and store them in a Kubernetes `Secret`. | +| globals.enableAnalytics | string | `"1"` | Whether to enable analytics or not. You can read more about our analytics here: . | +| globals.google | object | `{"clientId":"","secret":""}` | Google OAuth settings. These can also be set in the Budibase UI, see for details. | +| globals.google.clientId | string | `""` | Client ID of your Google OAuth app. | +| globals.google.secret | string | `""` | Client secret of your Google OAuth app. | +| globals.httpMigrations | string | `"0"` | Whether or not to enable doing data migrations over the HTTP API. If this is set to "0", migrations are run on startup. You shouldn't ordinarily need to change this. | +| globals.internalApiKey | string | `""` | API key used for internal Budibase API calls. You don't need to set this if `createSecrets` is true. | +| globals.internalApiKeyFallback | string | `""` | A fallback value for `internalApiKey`. If you're rotating your encryption key, you can set this to the old value for the duration of the rotation. | +| globals.jwtSecret | string | `""` | Secret used for signing JWTs. You don't need to set this if `createSecrets` is true. | +| globals.jwtSecretFallback | string | `""` | A fallback value for `jwtSecret`. If you're rotating your JWT secret, you can set this to the old value for the duration of the rotation. | +| globals.platformUrl | string | `""` | Set the `platformUrl` binding. You can also do this in Settings > Organisation if you are self-hosting. | +| globals.smtp.enabled | bool | `false` | Whether to enable SMTP or not. | +| globals.smtp.from | string | `""` | The email address to use in the "From:" field of emails sent by Budibase. | +| globals.smtp.host | string | `""` | The hostname of your SMTP server. | +| globals.smtp.password | string | `""` | The password to use when authenticating with your SMTP server. | +| globals.smtp.port | string | `"587"` | The port of your SMTP server. | +| globals.smtp.user | string | `""` | The username to use when authenticating with your SMTP server. | +| globals.tenantFeatureFlags | string | `"*:LICENSING,*:USER_GROUPS,*:ONBOARDING_TOUR"` | Sets what feature flags are enabled and for which tenants. Should not ordinarily need to be changed. | +| imagePullSecrets | list | `[]` | Passed to all pods created by this chart. Should not ordinarily need to be changed. | +| ingress.className | string | `""` | What ingress class to use. | +| ingress.enabled | bool | `true` | Whether to create an Ingress resource pointing to the Budibase proxy. | +| ingress.hosts | list | `[]` | Standard hosts block for the Ingress resource. Defaults to pointing to the Budibase proxy. | +| nameOverride | string | `""` | Override the name of the deployment. Defaults to {{ .Chart.Name }}. | +| service.port | int | `10000` | Port to expose on the service. | +| service.type | string | `"ClusterIP"` | Service type for the service that points to the main Budibase proxy pod. | +| serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| serviceAccount.create | bool | `true` | Specifies whether a service account should be created | +| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | +| services.apps.autoscaling.enabled | bool | `false` | Whether to enable horizontal pod autoscaling for the apps service. | +| services.apps.autoscaling.maxReplicas | int | `10` | | +| services.apps.autoscaling.minReplicas | int | `1` | | +| services.apps.autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization percentage for the apps service. Note that for autoscaling to work, you will need to have metrics-server configured, and resources set for the apps pods. | +| services.apps.httpLogging | int | `1` | Whether or not to log HTTP requests to the apps service. | +| services.apps.livenessProbe | object | HTTP health checks. | Liveness probe configuration for apps pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.apps.logLevel | string | `"info"` | The log level for the apps service. | +| services.apps.readinessProbe | object | HTTP health checks. | Readiness probe configuration for apps pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.apps.replicaCount | int | `1` | The number of apps replicas to run. | +| services.apps.resources | object | `{}` | The resources to use for apps pods. See for more information on how to set these. | +| services.apps.startupProbe | object | HTTP health checks. | Startup probe configuration for apps pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.automationWorkers.autoscaling.enabled | bool | `false` | Whether to enable horizontal pod autoscaling for the apps service. | +| services.automationWorkers.autoscaling.maxReplicas | int | `10` | | +| services.automationWorkers.autoscaling.minReplicas | int | `1` | | +| services.automationWorkers.autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization percentage for the automation worker service. Note that for autoscaling to work, you will need to have metrics-server configured, and resources set for the automation worker pods. | +| services.automationWorkers.enabled | bool | `true` | Whether or not to enable the automation worker service. If you disable this, automations will be processed by the apps service. | +| services.automationWorkers.livenessProbe | object | HTTP health checks. | Liveness probe configuration for automation worker pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.automationWorkers.logLevel | string | `"info"` | The log level for the automation worker service. | +| services.automationWorkers.readinessProbe | object | HTTP health checks. | Readiness probe configuration for automation worker pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.automationWorkers.replicaCount | int | `1` | The number of automation worker replicas to run. | +| services.automationWorkers.resources | object | `{}` | The resources to use for automation worker pods. See for more information on how to set these. | +| services.automationWorkers.startupProbe | object | HTTP health checks. | Startup probe configuration for automation worker pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.couchdb.backup.enabled | bool | `false` | Whether or not to enable periodic CouchDB backups. This works by replicating to another CouchDB instance. | +| services.couchdb.backup.interval | string | `""` | Backup interval in seconds | +| services.couchdb.backup.resources | object | `{}` | The resources to use for CouchDB backup pods. See for more information on how to set these. | +| services.couchdb.backup.target | string | `""` | Target couchDB instance to back up to, either a hostname or an IP address. | +| services.couchdb.enabled | bool | `true` | Whether or not to spin up a CouchDB instance in your cluster. True by default, and the configuration for the CouchDB instance is under the `couchdb` key at the root of this file. You can see what options are available to you by looking at the official CouchDB Helm chart: . | +| services.couchdb.port | int | `5984` | | +| services.dns | string | `"cluster.local"` | The DNS suffix to use for service discovery. You only need to change this if you've configured your cluster to use a different DNS suffix. | +| services.objectStore.accessKey | string | `""` | AWS_ACCESS_KEY if using S3 | +| services.objectStore.browser | bool | `true` | Whether to enable the Minio web console or not. If you're exposing Minio to the Internet (via a custom Ingress record, for example), you should set this to false. If you're only exposing Minio to your cluster, you can leave this as true. | +| services.objectStore.cloudfront.cdn | string | `""` | Set the url of a distribution to enable cloudfront. | +| services.objectStore.cloudfront.privateKey64 | string | `""` | Base64 encoded private key for the above public key. | +| services.objectStore.cloudfront.publicKeyId | string | `""` | ID of public key stored in cloudfront. | +| services.objectStore.minio | bool | `true` | Set to false if using another object store, such as S3. You will need to set `services.objectStore.url` to point to your bucket if you do this. | +| services.objectStore.region | string | `""` | AWS_REGION if using S3 | +| services.objectStore.resources | object | `{}` | The resources to use for Minio pods. See for more information on how to set these. | +| services.objectStore.secretKey | string | `""` | AWS_SECRET_ACCESS_KEY if using S3 | +| services.objectStore.storage | string | `"100Mi"` | How much storage to give Minio in its PersistentVolumeClaim. | +| services.objectStore.storageClass | string | `""` | If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| services.objectStore.url | string | `"http://minio-service:9000"` | URL to use for object storage. Only change this if you're using an external object store, such as S3. Remember to set `minio: false` if you do this. | +| services.proxy.autoscaling.enabled | bool | `false` | Whether to enable horizontal pod autoscaling for the proxy service. | +| services.proxy.autoscaling.maxReplicas | int | `10` | | +| services.proxy.autoscaling.minReplicas | int | `1` | | +| services.proxy.autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization percentage for the proxy service. Note that for autoscaling to work, you will need to have metrics-server configured, and resources set for the proxy pods. | +| services.proxy.livenessProbe | object | HTTP health checks. | Liveness probe configuration for proxy pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.proxy.readinessProbe | object | HTTP health checks. | Readiness probe configuration for proxy pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.proxy.replicaCount | int | `1` | The number of proxy replicas to run. | +| services.proxy.resources | object | `{}` | The resources to use for proxy pods. See for more information on how to set these. | +| services.proxy.startupProbe | object | HTTP health checks. | Startup probe configuration for proxy pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.redis.enabled | bool | `true` | Whether or not to deploy a Redis pod into your cluster. | +| services.redis.password | string | `"budibase"` | The password to use when connecting to Redis. It's recommended that you change this from the default if you're running Redis in-cluster. | +| services.redis.port | int | `6379` | Port to expose Redis on. | +| services.redis.resources | object | `{}` | The resources to use for Redis pods. See for more information on how to set these. | +| services.redis.storage | string | `"100Mi"` | How much persistent storage to allocate to Redis. | +| services.redis.storageClass | string | `""` | If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| services.redis.url | string | `""` | If you choose to run Redis externally to this chart, you can specify the connection details here. | +| services.worker.autoscaling.enabled | bool | `false` | Whether to enable horizontal pod autoscaling for the worker service. | +| services.worker.autoscaling.maxReplicas | int | `10` | | +| services.worker.autoscaling.minReplicas | int | `1` | | +| services.worker.autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization percentage for the worker service. Note that for autoscaling to work, you will need to have metrics-server configured, and resources set for the worker pods. | +| services.worker.httpLogging | int | `1` | Whether or not to log HTTP requests to the worker service. | +| services.worker.livenessProbe | object | HTTP health checks. | Liveness probe configuration for worker pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.worker.logLevel | string | `"info"` | The log level for the worker service. | +| services.worker.readinessProbe | object | HTTP health checks. | Readiness probe configuration for worker pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.worker.replicaCount | int | `1` | The number of worker replicas to run. | +| services.worker.resources | object | `{}` | The resources to use for worker pods. See for more information on how to set these. | +| services.worker.startupProbe | object | HTTP health checks. | Startup probe configuration for worker pods. You shouldn't need to change this, but if you want to you can find more information here: | +| tolerations | list | `[]` | Sets the tolerations for all pods created by this chart. Should not ordinarily need to be changed. See for more information on tolerations. | + +## Uninstalling + +To uninstall the chart, assuming you named the release `budibase` (both commands in the installation section do so): + +```console +$ helm uninstall --namespace budibase budibase +``` + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.3](https://github.com/norwoodj/helm-docs/releases/v1.11.3) diff --git a/charts/budibase/README.md.gotmpl b/charts/budibase/README.md.gotmpl new file mode 100644 index 0000000000..e37c323837 --- /dev/null +++ b/charts/budibase/README.md.gotmpl @@ -0,0 +1,117 @@ +{{ template "chart.header" . }} +{{ template "chart.description" . }} + +## Prerequisites + +- `helm` v3 or above +- Kubernetes 1.4+ +- A storage controller (if you want to use persistent storage) +- An ingress controller (if you want to define an `Ingress` resource) +- `metrics-server` (if you want to make use of horizontal pod autoscaling) + +## Chart dependencies + +This chart depends on the official Apache CouchDB chart. You can see its +documentation here: +. + +## Upgrading + +### `2.x` to `3.0.0` + +We made a number of breaking changes in this release to make the chart more +idiomatic and easier to use. + +1. We no longer bundle `ingress-nginx`. If you were relying on this to supply + an ingress controller to your cluster, you will now need to deploy that + separately. You'll find guidance for that here: + . +2. We've upgraded the version of the [CouchDB chart](https://github.com/apache/couchdb-helm) + we use from `3.3.4` to `4.3.0`. The primary motivation for this was to align + the CouchDB chart used with the CouchDB version used, which has also updated + from 3.1.1 to 3.2.1. Additionally, we're moving away from the official CouchDB + to one we're building ourselves. +3. We've separated out the supplied AWS ALB ingress resource for those deploying + into EKS. Where previously you enabled this by setting `ingress.enabled: false` + and `ingress.aws: true`, you now set `awsAlbIngress.enabled: true` and all + configuration for it is under `awsAlbIngress`. +4. The `HorizontalPodAutoscaler` that was configured at `hpa.enabled: true` has + been split into 3 separate HPAs, one for each of `apps`, `worker`, and `proxy`. + They are configured at `services.{apps,worker,proxy}.autoscaling`. + +## Installing + +To install the chart from our repository: + +```console +$ helm repo add budibase https://budibase.github.io/budibase/ +$ helm repo update +$ helm install --create-namespace --namespace budibase budibase budibase/budibase +``` + +To install the chart from this repo: + +```console +$ git clone git@github.com:budibase/budibase.git +$ cd budibase/charts/budibase +$ helm install --create-namespace --namespace budibase budibase . +``` + +## Example minimal configuration + +Here's an example `values.yaml` that would get a Budibase instance running in a home +cluster using an nginx ingress controller and NFS as cluster storage (basically one of our +staff's homelabs). + +
+ +```yaml +ingress: + enabled: true + className: "nginx" + hosts: + - host: budibase.local # set this to whatever DNS name you'd use + paths: + - backend: + service: + name: proxy-service + port: + number: 10000 + path: / + pathType: Prefix + +couchdb: + persistentVolume: + enabled: true + storageClass: "nfs-client" + adminPassword: admin + +services: + objectStore: + storageClass: "nfs-client" + redis: + storageClass: "nfs-client" +``` + +If you wanted to use this when bringing up Budibase in your own cluster, you could save it +to your hard disk and run the following: + +```console +$ helm install --create-namespace --namespace budibase budibase . -f values.yaml +``` + +
+ +## Configuring + +{{ template "chart.valuesTable" . }} + +## Uninstalling + +To uninstall the chart, assuming you named the release `budibase` (both commands in the installation section do so): + +```console +$ helm uninstall --namespace budibase budibase +``` + +{{ template "helm-docs.versionFooter" . }} diff --git a/charts/budibase/charts/couchdb-3.3.4.tgz b/charts/budibase/charts/couchdb-3.3.4.tgz deleted file mode 100644 index f7ebfd3e96..0000000000 Binary files a/charts/budibase/charts/couchdb-3.3.4.tgz and /dev/null differ diff --git a/charts/budibase/charts/couchdb-4.3.0.tgz b/charts/budibase/charts/couchdb-4.3.0.tgz new file mode 100644 index 0000000000..d3cce28ee6 Binary files /dev/null and b/charts/budibase/charts/couchdb-4.3.0.tgz differ diff --git a/charts/budibase/charts/ingress-nginx-4.0.13.tgz b/charts/budibase/charts/ingress-nginx-4.0.13.tgz deleted file mode 100644 index 1e34215c5f..0000000000 Binary files a/charts/budibase/charts/ingress-nginx-4.0.13.tgz and /dev/null differ diff --git a/charts/budibase/templates/alb-ingress.yaml b/charts/budibase/templates/alb-ingress.yaml index 6cd1cf2cba..89b4e9e2cb 100644 --- a/charts/budibase/templates/alb-ingress.yaml +++ b/charts/budibase/templates/alb-ingress.yaml @@ -1,4 +1,4 @@ -{{- if .Values.ingress.aws }} +{{- if .Values.awsAlbIngress.enabled }} apiVersion: networking.k8s.io/v1 kind: Ingress metadata: @@ -7,24 +7,24 @@ metadata: kubernetes.io/ingress.class: alb alb.ingress.kubernetes.io/scheme: internet-facing alb.ingress.kubernetes.io/target-type: ip - alb.ingress.kubernetes.io/success-codes: 200,301 - alb.ingress.kubernetes.io/healthcheck-path: / - {{- if .Values.ingress.certificateArn }} + alb.ingress.kubernetes.io/success-codes: '200' + alb.ingress.kubernetes.io/healthcheck-path: '/health' + {{- if .Values.awsAlbIngress.certificateArn }} alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' - alb.ingress.kubernetes.io/certificate-arn: {{ .Values.ingress.certificateArn }} + alb.ingress.kubernetes.io/certificate-arn: {{ .Values.awsAlbIngress.certificateArn }} {{- end }} - {{- if .Values.ingress.sslPolicy }} - alb.ingress.kubernetes.io/actions.ssl-policy: {{ .Values.ingress.sslPolicy }} + {{- if .Values.awsAlbIngress.sslPolicy }} + alb.ingress.kubernetes.io/actions.ssl-policy: {{ .Values.awsAlbIngress.sslPolicy }} {{- end }} - {{- if .Values.ingress.securityGroups }} - alb.ingress.kubernetes.io/security-groups: {{ .Values.ingress.securityGroups }} + {{- if .Values.awsAlbIngress.securityGroups }} + alb.ingress.kubernetes.io/security-groups: {{ .Values.awsAlbIngress.securityGroups }} {{- end }} spec: rules: - http: paths: - {{- if .Values.ingress.certificateArn }} + {{- if .Values.awsAlbIngress.certificateArn }} - path: / pathType: Prefix backend: diff --git a/charts/budibase/templates/app-service-deployment.yaml b/charts/budibase/templates/app-service-deployment.yaml index 73c6d990d2..c7c4481122 100644 --- a/charts/budibase/templates/app-service-deployment.yaml +++ b/charts/budibase/templates/app-service-deployment.yaml @@ -2,12 +2,9 @@ apiVersion: apps/v1 kind: Deployment metadata: annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) {{ if .Values.services.apps.deploymentAnnotations }} {{- toYaml .Values.services.apps.deploymentAnnotations | indent 4 -}} {{ end }} - creationTimestamp: null labels: io.kompose.service: app-service {{ if .Values.services.apps.deploymentLabels }} @@ -24,12 +21,9 @@ spec: template: metadata: annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) {{ if .Values.services.apps.templateAnnotations }} {{- toYaml .Values.services.apps.templateAnnotations | indent 8 -}} {{ end }} - creationTimestamp: null labels: io.kompose.service: app-service {{ if .Values.services.apps.templateLabels }} @@ -198,7 +192,14 @@ spec: - name: NODE_TLS_REJECT_UNAUTHORIZED value: {{ .Values.services.tlsRejectUnauthorized }} {{ end }} - + {{- if .Values.services.automationWorkers.enabled }} + - name: APP_FEATURES + value: "api" + {{- end }} + {{- range .Values.services.apps.extraEnv }} + - name: {{ .name }} + value: {{ .value | quote }} + {{- end }} image: budibase/apps:{{ .Values.globals.appVersion | default .Chart.AppVersion }} imagePullPolicy: Always {{- if .Values.services.apps.startupProbe }} @@ -226,6 +227,14 @@ spec: resources: {{- toYaml . | nindent 10 }} {{ end }} + {{ if .Values.services.apps.command }} + command: + {{- toYaml .Values.services.apps.command | nindent 10 }} + {{ end }} + {{ if .Values.services.apps.args }} + args: + {{- toYaml .Values.services.apps.args | nindent 10 }} + {{ end }} {{- with .Values.affinity }} affinity: {{- toYaml . | nindent 8 }} @@ -243,4 +252,10 @@ spec: {{ end }} restartPolicy: Always serviceAccountName: "" + {{ if .Values.services.apps.ndots }} + dnsConfig: + options: + - name: ndots + value: {{ .Values.services.apps.ndots | quote }} + {{ end }} status: {} diff --git a/charts/budibase/templates/app-service-hpa.yaml b/charts/budibase/templates/app-service-hpa.yaml new file mode 100644 index 0000000000..e819ecb9e3 --- /dev/null +++ b/charts/budibase/templates/app-service-hpa.yaml @@ -0,0 +1,32 @@ +{{- if .Values.services.apps.autoscaling.enabled }} +apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "budibase.fullname" . }}-apps + labels: + {{- include "budibase.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: app-service + minReplicas: {{ .Values.services.apps.autoscaling.minReplicas }} + maxReplicas: {{ .Values.services.apps.autoscaling.maxReplicas }} + metrics: + {{- if .Values.services.apps.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.services.apps.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.services.apps.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.services.apps.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/budibase/templates/app-service-service.yaml b/charts/budibase/templates/app-service-service.yaml index 5247b4de09..6d19590d45 100644 --- a/charts/budibase/templates/app-service-service.yaml +++ b/charts/budibase/templates/app-service-service.yaml @@ -1,10 +1,6 @@ apiVersion: v1 kind: Service metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: io.kompose.service: app-service name: app-service diff --git a/charts/budibase/templates/automation-worker-service-deployment.yaml b/charts/budibase/templates/automation-worker-service-deployment.yaml new file mode 100644 index 0000000000..36c3a8ffbf --- /dev/null +++ b/charts/budibase/templates/automation-worker-service-deployment.yaml @@ -0,0 +1,262 @@ +{{- if .Values.services.automationWorkers.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: +{{ if .Values.services.automationWorkers.deploymentAnnotations }} +{{- toYaml .Values.services.automationWorkers.deploymentAnnotations | indent 4 -}} +{{ end }} + labels: + io.kompose.service: automation-worker-service +{{ if .Values.services.automationWorkers.deploymentLabels }} +{{- toYaml .Values.services.automationWorkers.deploymentLabels | indent 4 -}} +{{ end }} + name: automation-worker-service +spec: + replicas: {{ .Values.services.automationWorkers.replicaCount }} + selector: + matchLabels: + io.kompose.service: automation-worker-service + strategy: + type: RollingUpdate + template: + metadata: + annotations: +{{ if .Values.services.automationWorkers.templateAnnotations }} +{{- toYaml .Values.services.automationWorkers.templateAnnotations | indent 8 -}} +{{ end }} + labels: + io.kompose.service: automation-worker-service +{{ if .Values.services.automationWorkers.templateLabels }} +{{- toYaml .Values.services.automationWorkers.templateLabels | indent 8 -}} +{{ end }} + spec: + containers: + - env: + - name: BUDIBASE_ENVIRONMENT + value: {{ .Values.globals.budibaseEnv }} + - name: DEPLOYMENT_ENVIRONMENT + value: "kubernetes" + - name: COUCH_DB_URL + {{ if .Values.services.couchdb.url }} + value: {{ .Values.services.couchdb.url }} + {{ else }} + value: http://{{ .Release.Name }}-svc-couchdb:{{ .Values.services.couchdb.port }} + {{ end }} + {{ if .Values.services.couchdb.enabled }} + - name: COUCH_DB_USER + valueFrom: + secretKeyRef: + name: {{ template "couchdb.fullname" . }} + key: adminUsername + - name: COUCH_DB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "couchdb.fullname" . }} + key: adminPassword + {{ end }} + - name: ENABLE_ANALYTICS + value: {{ .Values.globals.enableAnalytics | quote }} + - name: API_ENCRYPTION_KEY + value: {{ .Values.globals.apiEncryptionKey | quote }} + - name: HTTP_LOGGING + value: {{ .Values.services.automationWorkers.httpLogging | quote }} + - name: INTERNAL_API_KEY + valueFrom: + secretKeyRef: + name: {{ template "budibase.fullname" . }} + key: internalApiKey + - name: INTERNAL_API_KEY_FALLBACK + value: {{ .Values.globals.internalApiKeyFallback | quote }} + - name: JWT_SECRET + valueFrom: + secretKeyRef: + name: {{ template "budibase.fullname" . }} + key: jwtSecret + - name: JWT_SECRET_FALLBACK + value: {{ .Values.globals.jwtSecretFallback | quote }} + {{ if .Values.services.objectStore.region }} + - name: AWS_REGION + value: {{ .Values.services.objectStore.region }} + {{ end }} + - name: MINIO_ENABLED + value: {{ .Values.services.objectStore.minio | quote }} + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ template "budibase.fullname" . }} + key: objectStoreAccess + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ template "budibase.fullname" . }} + key: objectStoreSecret + - name: CLOUDFRONT_CDN + value: {{ .Values.services.objectStore.cloudfront.cdn | quote }} + - name: CLOUDFRONT_PUBLIC_KEY_ID + value: {{ .Values.services.objectStore.cloudfront.publicKeyId | quote }} + - name: CLOUDFRONT_PRIVATE_KEY_64 + value: {{ .Values.services.objectStore.cloudfront.privateKey64 | quote }} + - name: MINIO_URL + value: {{ .Values.services.objectStore.url }} + - name: PLUGIN_BUCKET_NAME + value: {{ .Values.services.objectStore.pluginBucketName | quote }} + - name: APPS_BUCKET_NAME + value: {{ .Values.services.objectStore.appsBucketName | quote }} + - name: GLOBAL_BUCKET_NAME + value: {{ .Values.services.objectStore.globalBucketName | quote }} + - name: BACKUPS_BUCKET_NAME + value: {{ .Values.services.objectStore.backupsBucketName | quote }} + - name: PORT + value: {{ .Values.services.automationWorkers.port | quote }} + {{ if .Values.services.worker.publicApiRateLimitPerSecond }} + - name: API_REQ_LIMIT_PER_SEC + value: {{ .Values.globals.automationWorkers.publicApiRateLimitPerSecond | quote }} + {{ end }} + - name: MULTI_TENANCY + value: {{ .Values.globals.multiTenancy | quote }} + - name: OFFLINE_MODE + value: {{ .Values.globals.offlineMode | quote }} + - name: LOG_LEVEL + value: {{ .Values.services.automationWorkers.logLevel | quote }} + - name: REDIS_PASSWORD + value: {{ .Values.services.redis.password }} + - name: REDIS_URL + {{ if .Values.services.redis.url }} + value: {{ .Values.services.redis.url }} + {{ else }} + value: redis-service:{{ .Values.services.redis.port }} + {{ end }} + - name: SELF_HOSTED + value: {{ .Values.globals.selfHosted | quote }} + - name: POSTHOG_TOKEN + value: {{ .Values.globals.posthogToken | quote }} + - name: WORKER_URL + value: http://worker-service:{{ .Values.services.worker.port }} + - name: PLATFORM_URL + value: {{ .Values.globals.platformUrl | quote }} + - name: ACCOUNT_PORTAL_URL + value: {{ .Values.globals.accountPortalUrl | quote }} + - name: ACCOUNT_PORTAL_API_KEY + value: {{ .Values.globals.accountPortalApiKey | quote }} + - name: COOKIE_DOMAIN + value: {{ .Values.globals.cookieDomain | quote }} + - name: HTTP_MIGRATIONS + value: {{ .Values.globals.httpMigrations | quote }} + - name: GOOGLE_CLIENT_ID + value: {{ .Values.globals.google.clientId | quote }} + - name: GOOGLE_CLIENT_SECRET + value: {{ .Values.globals.google.secret | quote }} + - name: AUTOMATION_MAX_ITERATIONS + value: {{ .Values.globals.automationMaxIterations | quote }} + - name: TENANT_FEATURE_FLAGS + value: {{ .Values.globals.tenantFeatureFlags | quote }} + - name: ENCRYPTION_KEY + value: {{ .Values.globals.bbEncryptionKey | quote }} + {{ if .Values.globals.bbAdminUserEmail }} + - name: BB_ADMIN_USER_EMAIL + value: {{ .Values.globals.bbAdminUserEmail | quote }} + {{ end }} + {{ if .Values.globals.bbAdminUserPassword }} + - name: BB_ADMIN_USER_PASSWORD + value: {{ .Values.globals.bbAdminUserPassword | quote }} + {{ end }} + {{ if .Values.globals.pluginsDir }} + - name: PLUGINS_DIR + value: {{ .Values.globals.pluginsDir | quote }} + {{ end }} + {{ if .Values.services.automationWorkers.nodeDebug }} + - name: NODE_DEBUG + value: {{ .Values.services.automationWorkers.nodeDebug | quote }} + {{ end }} + {{ if .Values.globals.datadogApmEnabled }} + - name: DD_LOGS_INJECTION + value: {{ .Values.globals.datadogApmEnabled | quote }} + - name: DD_APM_ENABLED + value: {{ .Values.globals.datadogApmEnabled | quote }} + - name: DD_APM_DD_URL + value: https://trace.agent.datadoghq.eu + {{ end }} + {{ if .Values.globals.globalAgentHttpProxy }} + - name: GLOBAL_AGENT_HTTP_PROXY + value: {{ .Values.globals.globalAgentHttpProxy | quote }} + {{ end }} + {{ if .Values.globals.globalAgentHttpsProxy }} + - name: GLOBAL_AGENT_HTTPS_PROXY + value: {{ .Values.globals.globalAgentHttpsProxy | quote }} + {{ end }} + {{ if .Values.globals.globalAgentNoProxy }} + - name: GLOBAL_AGENT_NO_PROXY + value: {{ .Values.globals.globalAgentNoProxy | quote }} + {{ end }} + {{ if .Values.services.tlsRejectUnauthorized }} + - name: NODE_TLS_REJECT_UNAUTHORIZED + value: {{ .Values.services.tlsRejectUnauthorized }} + {{ end }} + - name: APP_FEATURES + value: "automations" + {{- range .Values.services.automationWorkers.extraEnv }} + - name: {{ .name }} + value: {{ .value | quote }} + {{- end }} + + image: budibase/apps:{{ .Values.globals.appVersion | default .Chart.AppVersion }} + imagePullPolicy: Always + {{- if .Values.services.automationWorkers.startupProbe }} + {{- with .Values.services.automationWorkers.startupProbe }} + startupProbe: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} + {{- if .Values.services.automationWorkers.livenessProbe }} + {{- with .Values.services.automationWorkers.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} + {{- if .Values.services.automationWorkers.readinessProbe }} + {{- with .Values.services.automationWorkers.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} + name: bbautomationworker + ports: + - containerPort: {{ .Values.services.automationWorkers.port }} + {{ with .Values.services.automationWorkers.resources }} + resources: + {{- toYaml . | nindent 10 }} + {{ end }} + {{ if .Values.services.automationWorkers.command }} + command: + {{- toYaml .Values.services.automationWorkers.command | nindent 10 }} + {{ end }} + {{ if .Values.services.automationWorkers.args }} + args: + {{- toYaml .Values.services.automationWorkers.args | nindent 10 }} + {{ end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{ if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{ end }} + {{ if .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml .Values.imagePullSecrets | nindent 6 }} + {{ end }} + restartPolicy: Always + serviceAccountName: "" + {{ if .Values.services.automationWorkers.ndots }} + dnsConfig: + options: + - name: ndots + value: {{ .Values.services.automationWorkers.ndots | quote }} + {{ end }} +status: {} +{{- end }} \ No newline at end of file diff --git a/charts/budibase/templates/automation-worker-service-hpa.yaml b/charts/budibase/templates/automation-worker-service-hpa.yaml new file mode 100644 index 0000000000..f29223b61b --- /dev/null +++ b/charts/budibase/templates/automation-worker-service-hpa.yaml @@ -0,0 +1,32 @@ +{{- if .Values.services.automationWorkers.autoscaling.enabled }} +apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "budibase.fullname" . }}-apps + labels: + {{- include "budibase.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: automation-worker-service + minReplicas: {{ .Values.services.automationWorkers.autoscaling.minReplicas }} + maxReplicas: {{ .Values.services.automationWorkers.autoscaling.maxReplicas }} + metrics: + {{- if .Values.services.automationWorkers.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.services.automationWorkers.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.services.automationWorkers.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.services.automationWorkers.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/budibase/templates/couchdb-backup.yaml b/charts/budibase/templates/couchdb-backup.yaml index 7396f97476..6f842537a7 100644 --- a/charts/budibase/templates/couchdb-backup.yaml +++ b/charts/budibase/templates/couchdb-backup.yaml @@ -2,10 +2,6 @@ apiVersion: apps/v1 kind: Deployment metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: app.kubernetes.io/name: couchdb-backup name: couchdb-backup @@ -18,10 +14,6 @@ spec: type: Recreate template: metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: app.kubernetes.io/name: couchdb-backup spec: diff --git a/charts/budibase/templates/hpa.yaml b/charts/budibase/templates/hpa.yaml deleted file mode 100644 index 2f901b4664..0000000000 --- a/charts/budibase/templates/hpa.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "budibase.fullname" . }} - labels: - {{- include "budibase.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "budibase.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} diff --git a/charts/budibase/templates/minio-data-persistentvolumeclaim.yaml b/charts/budibase/templates/minio-data-persistentvolumeclaim.yaml index abcf341bc5..c17001a436 100644 --- a/charts/budibase/templates/minio-data-persistentvolumeclaim.yaml +++ b/charts/budibase/templates/minio-data-persistentvolumeclaim.yaml @@ -2,7 +2,6 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: - creationTimestamp: null labels: io.kompose.service: minio-data name: minio-data diff --git a/charts/budibase/templates/minio-service-deployment.yaml b/charts/budibase/templates/minio-service-deployment.yaml index 41af2624bf..28e8eb9991 100644 --- a/charts/budibase/templates/minio-service-deployment.yaml +++ b/charts/budibase/templates/minio-service-deployment.yaml @@ -2,10 +2,6 @@ apiVersion: apps/v1 kind: Deployment metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: io.kompose.service: minio-service name: minio-service @@ -18,10 +14,6 @@ spec: type: Recreate template: metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: io.kompose.service: minio-service spec: @@ -46,11 +38,9 @@ spec: image: minio/minio imagePullPolicy: "" livenessProbe: - exec: - command: - - curl - - -f - - http://localhost:9000/minio/health/live + httpGet: + path: /minio/health/live + port: 9000 failureThreshold: 3 periodSeconds: 30 timeoutSeconds: 20 diff --git a/charts/budibase/templates/minio-service-service.yaml b/charts/budibase/templates/minio-service-service.yaml index cfdb22002b..ce89f1347c 100644 --- a/charts/budibase/templates/minio-service-service.yaml +++ b/charts/budibase/templates/minio-service-service.yaml @@ -2,10 +2,6 @@ apiVersion: v1 kind: Service metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: io.kompose.service: minio-service name: minio-service diff --git a/charts/budibase/templates/proxy-service-deployment.yaml b/charts/budibase/templates/proxy-service-deployment.yaml index 53bba6232d..233028cafe 100644 --- a/charts/budibase/templates/proxy-service-deployment.yaml +++ b/charts/budibase/templates/proxy-service-deployment.yaml @@ -2,12 +2,9 @@ apiVersion: apps/v1 kind: Deployment metadata: annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) {{ if .Values.services.proxy.deploymentAnnotations }} {{- toYaml .Values.services.proxy.deploymentAnnotations | indent 4 -}} {{ end }} - creationTimestamp: null labels: app.kubernetes.io/name: budibase-proxy {{ if .Values.services.proxy.deploymentLabels }} @@ -19,17 +16,15 @@ spec: selector: matchLabels: app.kubernetes.io/name: budibase-proxy + minReadySeconds: 10 strategy: type: RollingUpdate template: metadata: annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) {{ if .Values.services.proxy.templateAnnotations }} {{- toYaml .Values.services.proxy.templateAnnotations | indent 8 -}} {{ end }} - creationTimestamp: null labels: app.kubernetes.io/name: budibase-proxy {{ if .Values.services.proxy.templateLabels }} @@ -105,5 +100,19 @@ spec: {{ end }} restartPolicy: Always serviceAccountName: "" + {{ if .Values.services.proxy.command }} + command: + {{- toYaml .Values.services.proxy.command | nindent 8 }} + {{ end }} + {{ if .Values.services.proxy.args }} + args: + {{- toYaml .Values.services.proxy.args | nindent 8 }} + {{ end }} volumes: + {{ if .Values.services.proxy.ndots }} + dnsConfig: + options: + - name: ndots + value: {{ .Values.services.proxy.ndots | quote }} + {{ end }} status: {} diff --git a/charts/budibase/templates/proxy-service-hpa.yaml b/charts/budibase/templates/proxy-service-hpa.yaml new file mode 100644 index 0000000000..b6c6022008 --- /dev/null +++ b/charts/budibase/templates/proxy-service-hpa.yaml @@ -0,0 +1,32 @@ +{{- if .Values.services.proxy.autoscaling.enabled }} +apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "budibase.fullname" . }}-proxy + labels: + {{- include "budibase.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: proxy-service + minReplicas: {{ .Values.services.proxy.autoscaling.minReplicas }} + maxReplicas: {{ .Values.services.proxy.autoscaling.maxReplicas }} + metrics: + {{- if .Values.services.proxy.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.services.proxy.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.services.proxy.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.services.proxy.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/budibase/templates/proxy-service-service.yaml b/charts/budibase/templates/proxy-service-service.yaml index bf2b199ee5..988c540599 100644 --- a/charts/budibase/templates/proxy-service-service.yaml +++ b/charts/budibase/templates/proxy-service-service.yaml @@ -1,10 +1,6 @@ apiVersion: v1 kind: Service metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: app.kubernetes.io/name: budibase-proxy name: proxy-service @@ -16,4 +12,4 @@ spec: selector: app.kubernetes.io/name: budibase-proxy status: - loadBalancer: {} \ No newline at end of file + loadBalancer: {} diff --git a/charts/budibase/templates/redis-data-persistentvolumeclaim.yaml b/charts/budibase/templates/redis-data-persistentvolumeclaim.yaml index 6f11492b58..e605a98376 100644 --- a/charts/budibase/templates/redis-data-persistentvolumeclaim.yaml +++ b/charts/budibase/templates/redis-data-persistentvolumeclaim.yaml @@ -2,7 +2,6 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: - creationTimestamp: null labels: io.kompose.service: redis-data name: redis-data diff --git a/charts/budibase/templates/redis-service-deployment.yaml b/charts/budibase/templates/redis-service-deployment.yaml index 9b39d14291..bca40d2237 100644 --- a/charts/budibase/templates/redis-service-deployment.yaml +++ b/charts/budibase/templates/redis-service-deployment.yaml @@ -2,10 +2,6 @@ apiVersion: apps/v1 kind: Deployment metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: io.kompose.service: redis-service name: redis-service @@ -18,10 +14,6 @@ spec: type: Recreate template: metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: io.kompose.service: redis-service spec: diff --git a/charts/budibase/templates/redis-service-service.yaml b/charts/budibase/templates/redis-service-service.yaml index 55ca40ed88..b8f998925a 100644 --- a/charts/budibase/templates/redis-service-service.yaml +++ b/charts/budibase/templates/redis-service-service.yaml @@ -2,10 +2,6 @@ apiVersion: v1 kind: Service metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: io.kompose.service: redis-service name: redis-service diff --git a/charts/budibase/templates/worker-service-deployment.yaml b/charts/budibase/templates/worker-service-deployment.yaml index 5e0addb9dd..2f97508ae3 100644 --- a/charts/budibase/templates/worker-service-deployment.yaml +++ b/charts/budibase/templates/worker-service-deployment.yaml @@ -2,12 +2,9 @@ apiVersion: apps/v1 kind: Deployment metadata: annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) {{ if .Values.services.worker.deploymentAnnotations }} {{- toYaml .Values.services.worker.deploymentAnnotations | indent 4 -}} {{ end }} - creationTimestamp: null labels: io.kompose.service: worker-service {{ if .Values.services.worker.deploymentLabels }} @@ -24,12 +21,9 @@ spec: template: metadata: annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) {{ if .Values.services.worker.templateAnnotations }} {{- toYaml .Values.services.worker.templateAnnotations | indent 8 -}} {{ end }} - creationTimestamp: null labels: io.kompose.service: worker-service {{ if .Values.services.worker.templateLabels }} @@ -188,6 +182,10 @@ spec: - name: NODE_TLS_REJECT_UNAUTHORIZED value: {{ .Values.services.tlsRejectUnauthorized }} {{ end }} + {{- range .Values.services.worker.extraEnv }} + - name: {{ .name }} + value: {{ .value | quote }} + {{- end }} image: budibase/worker:{{ .Values.globals.appVersion | default .Chart.AppVersion }} imagePullPolicy: Always {{- if .Values.services.worker.startupProbe }} @@ -215,6 +213,14 @@ spec: resources: {{- toYaml . | nindent 10 }} {{ end }} + {{ if .Values.services.worker.command }} + command: + {{- toYaml .Values.services.worker.command | nindent 10 }} + {{ end }} + {{ if .Values.services.worker.args }} + args: + {{- toYaml .Values.services.worker.args | nindent 10 }} + {{ end }} {{- with .Values.affinity }} affinity: {{- toYaml . | nindent 8 }} @@ -232,4 +238,10 @@ spec: {{ end }} restartPolicy: Always serviceAccountName: "" + {{ if .Values.services.worker.ndots }} + dnsConfig: + options: + - name: ndots + value: {{ .Values.services.worker.ndots | quote }} + {{ end }} status: {} diff --git a/charts/budibase/templates/worker-service-hpa.yaml b/charts/budibase/templates/worker-service-hpa.yaml new file mode 100644 index 0000000000..a04cc259a0 --- /dev/null +++ b/charts/budibase/templates/worker-service-hpa.yaml @@ -0,0 +1,32 @@ +{{- if .Values.services.worker.autoscaling.enabled }} +apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "budibase.fullname" . }}-worker + labels: + {{- include "budibase.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: worker-service + minReplicas: {{ .Values.services.worker.autoscaling.minReplicas }} + maxReplicas: {{ .Values.services.worker.autoscaling.maxReplicas }} + metrics: + {{- if .Values.services.worker.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.services.worker.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.services.worker.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.services.worker.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/budibase/templates/worker-service-service.yaml b/charts/budibase/templates/worker-service-service.yaml index a79ba1e04b..c5f56ba205 100644 --- a/charts/budibase/templates/worker-service-service.yaml +++ b/charts/budibase/templates/worker-service-service.yaml @@ -1,10 +1,6 @@ apiVersion: v1 kind: Service metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: io.kompose.service: worker-service name: worker-service diff --git a/charts/budibase/values.yaml b/charts/budibase/values.yaml index 857067d0f1..19b6c22d6c 100644 --- a/charts/budibase/values.yaml +++ b/charts/budibase/values.yaml @@ -1,56 +1,32 @@ -# Default values for budibase. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -image: - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: "" - +# -- Passed to all pods created by this chart. Should not ordinarily need to be changed. imagePullSecrets: [] +# -- Override the name of the deployment. Defaults to {{ .Chart.Name }}. nameOverride: "" -# fullnameOverride: "" serviceAccount: - # Specifies whether a service account should be created + # -- Specifies whether a service account should be created create: true - # Annotations to add to the service account + # -- Annotations to add to the service account annotations: {} - # The name of the service account to use. + # -- The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "" -podAnnotations: {} - -podSecurityContext: - {} - # fsGroup: 2000 - -securityContext: - {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - service: + # -- Service type for the service that points to the main Budibase proxy pod. type: ClusterIP + # -- Port to expose on the service. port: 10000 ingress: + # -- Whether to create an Ingress resource pointing to the Budibase proxy. enabled: true - aws: false - nginx: true - certificateArn: "" + # -- What ingress class to use. className: "" - annotations: - kubernetes.io/ingress.class: nginx - nginx.ingress.kubernetes.io/client-max-body-size: 150M - nginx.ingress.kubernetes.io/proxy-body-size: 50m + # -- Standard hosts block for the Ingress resource. Defaults to pointing to the Budibase proxy. hosts: - - host: # change if using custom domain + # @ignore + - host: paths: - path: / pathType: Prefix @@ -60,361 +36,504 @@ ingress: port: number: 10000 -autoscaling: +awsAlbIngress: + # -- Whether to create an ALB Ingress resource pointing to the Budibase proxy. Requires the AWS ALB Ingress Controller. enabled: false - minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - # targetMemoryUtilizationPercentage: 80 - -nodeSelector: {} + # -- If you're wanting to use HTTPS, you'll need to create an ACM certificate and specify the ARN here. + certificateArn: "" +# -- Sets the tolerations for all pods created by this chart. Should not ordinarily need to be changed. +# See for more information +# on tolerations. tolerations: [] +# -- Sets the affinity for all pods created by this chart. Should not ordinarily +# need to be changed. See +# +# for more information on affinity. affinity: {} globals: - appVersion: "" # Use as an override to .Chart.AppVersion + # -- The version of Budibase to deploy. Defaults to what's specified by {{ .Chart.AppVersion }}. + # Ends up being used as the image version tag for the apps, proxy, and worker images. + appVersion: "" + # -- Sets the environment variable BUDIBASE_ENVIRONMENT for the apps and worker pods. Should not + # ordinarily need to be changed. budibaseEnv: PRODUCTION + # -- Sets what feature flags are enabled and for which tenants. Should not ordinarily need to be + # changed. tenantFeatureFlags: "*:LICENSING,*:USER_GROUPS,*:ONBOARDING_TOUR" + # -- Whether to enable analytics or not. You can read more about our analytics here: + # . enableAnalytics: "1" + # @ignore (only used if enableAnalytics is set to 1) posthogToken: "phc_bIjZL7oh2GEUd2vqvTBH8WvrX0fWTFQMs6H5KQxiUxU" - selfHosted: "1" # set to 0 for budibase cloud environment, set to 1 for self-hosted setup - multiTenancy: "0" # set to 0 to disable multiple orgs, set to 1 to enable multiple orgs - offlineMode: "0" # set to 1 to enable offline mode + # @ignore (should not normally need to be changed, we only set this to "0" + # when deploying to our Cloud environment) + selfHosted: "1" + # @ignore (doesn't work out of the box for self-hosted users, only meant for Budicloud) + multiTenancy: "0" + # @ignore (only currently used to determine whether to fetch licenses offline or not, should + # not normally need to be changed, and only applies to Enterprise customers) + offlineMode: "0" + # @ignore (only needs to be set in our cloud environment) accountPortalUrl: "" + # @ignore (only needs to be set in our cloud environment) accountPortalApiKey: "" + # -- Sets the domain attribute of the cookie that Budibase uses to store session information. + # See + # for details on why you might want to set this. cookieDomain: "" + # -- Set the `platformUrl` binding. You can also do this in Settings > Organisation if you are + # self-hosting. platformUrl: "" + # -- Whether or not to enable doing data migrations over the HTTP API. If this is set to "0", + # migrations are run on startup. You shouldn't ordinarily need to change this. httpMigrations: "0" + # -- Google OAuth settings. These can also be set in the Budibase UI, see + # for details. google: + # -- Client ID of your Google OAuth app. clientId: "" + # -- Client secret of your Google OAuth app. secret: "" + # -- The maximum number of iterations allows for an automation loop step. You can read more about + # looping here: . automationMaxIterations: "200" - createSecrets: true # creates an internal API key, JWT secrets and redis password for you + # -- Create an internal API key, JWT secret, object store access key and + # secret, and store them in a Kubernetes `Secret`. + createSecrets: true - # if createSecrets is set to false, you can hard-code your secrets here + # -- Used for encrypting API keys and environment variables when stored in the database. + # You don't need to set this if `createSecrets` is true. apiEncryptionKey: "" + # -- API key used for internal Budibase API calls. You don't need to set this + # if `createSecrets` is true. internalApiKey: "" + # -- Secret used for signing JWTs. You don't need to set this if `createSecrets` is true. jwtSecret: "" - cdnUrl: "" - # fallback values used during live rotation + + # -- A fallback value for `internalApiKey`. If you're rotating your encryption key, you can + # set this to the old value for the duration of the rotation. internalApiKeyFallback: "" + # -- A fallback value for `jwtSecret`. If you're rotating your JWT secret, you can set this + # to the old value for the duration of the rotation. jwtSecretFallback: "" smtp: + # -- Whether to enable SMTP or not. enabled: false - -# globalAgentHttpProxy: -# globalAgentHttpsProxy: -# globalAgentNoProxy: + # -- The hostname of your SMTP server. + host: "" + # -- The port of your SMTP server. + port: "587" + # -- The email address to use in the "From:" field of emails sent by Budibase. + from: "" + # -- The username to use when authenticating with your SMTP server. + user: "" + # -- The password to use when authenticating with your SMTP server. + password: "" services: - budibaseVersion: latest + # -- The DNS suffix to use for service discovery. You only need to change this + # if you've configured your cluster to use a different DNS suffix. dns: cluster.local - # tlsRejectUnauthorized: 0 proxy: + # @ignore (you shouldn't need to change this) port: 10000 + # -- The number of proxy replicas to run. replicaCount: 1 + # @ignore (you should never need to change this) upstreams: apps: "http://app-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.apps.port }}" worker: "http://worker-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.worker.port }}" minio: "http://minio-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.objectStore.port }}" couchdb: "http://{{ .Release.Name }}-svc-couchdb:{{ .Values.services.couchdb.port }}" + # -- The resources to use for proxy pods. See + # + # for more information on how to set these. resources: {} + # -- Startup probe configuration for proxy pods. You shouldn't need to + # change this, but if you want to you can find more information here: + # + # @default -- HTTP health checks. startupProbe: + # @ignore httpGet: path: /health port: 10000 scheme: HTTP + # @ignore failureThreshold: 30 + # @ignore periodSeconds: 3 + # -- Readiness probe configuration for proxy pods. You shouldn't need to + # change this, but if you want to you can find more information here: + # + # @default -- HTTP health checks. readinessProbe: + # @ignore httpGet: path: /health port: 10000 scheme: HTTP + # @ignore periodSeconds: 3 + # @ignore failureThreshold: 1 + # -- Liveness probe configuration for proxy pods. You shouldn't need to + # change this, but if you want to you can find more information here: + # + # @default -- HTTP health checks. livenessProbe: + # @ignore httpGet: path: /health port: 10000 scheme: HTTP + # @ignore failureThreshold: 3 + # @ignore periodSeconds: 5 - # annotations: - # co.elastic.logs/module: nginx - # co.elastic.logs/fileset.stdout: access - # co.elastic.logs/fileset.stderr: error + autoscaling: + # -- Whether to enable horizontal pod autoscaling for the proxy service. + enabled: false + minReplicas: 1 + maxReplicas: 10 + # -- Target CPU utilization percentage for the proxy service. Note that + # for autoscaling to work, you will need to have metrics-server + # configured, and resources set for the proxy pods. + targetCPUUtilizationPercentage: 80 apps: + # @ignore (you shouldn't need to change this) port: 4002 + # -- The number of apps replicas to run. replicaCount: 1 + # -- The log level for the apps service. logLevel: info + # -- Whether or not to log HTTP requests to the apps service. httpLogging: 1 + # -- The resources to use for apps pods. See + # + # for more information on how to set these. resources: {} + # -- Extra environment variables to set for apps pods. Takes a list of + # name=value pairs. + extraEnv: [] + # -- Startup probe configuration for apps pods. You shouldn't need to + # change this, but if you want to you can find more information here: + # + # @default -- HTTP health checks. startupProbe: + # @ignore httpGet: path: /health port: 4002 scheme: HTTP + # @ignore failureThreshold: 30 + # @ignore periodSeconds: 3 + # -- Readiness probe configuration for apps pods. You shouldn't need to + # change this, but if you want to you can find more information here: + # + # @default -- HTTP health checks. readinessProbe: + # @ignore httpGet: path: /health port: 4002 scheme: HTTP + # @ignore periodSeconds: 3 + # @ignore failureThreshold: 1 + # -- Liveness probe configuration for apps pods. You shouldn't need to + # change this, but if you want to you can find more information here: + # + # @default -- HTTP health checks. livenessProbe: + # @ignore httpGet: path: /health port: 4002 scheme: HTTP + # @ignore failureThreshold: 3 + # @ignore periodSeconds: 5 - # nodeDebug: "" # set the value of NODE_DEBUG - # annotations: - # co.elastic.logs/multiline.type: pattern - # co.elastic.logs/multiline.pattern: '^[[:space:]]' - # co.elastic.logs/multiline.negate: false - # co.elastic.logs/multiline.match: after + autoscaling: + # -- Whether to enable horizontal pod autoscaling for the apps service. + enabled: false + minReplicas: 1 + maxReplicas: 10 + # -- Target CPU utilization percentage for the apps service. Note that for + # autoscaling to work, you will need to have metrics-server configured, + # and resources set for the apps pods. + targetCPUUtilizationPercentage: 80 + + automationWorkers: + # -- Whether or not to enable the automation worker service. If you disable this, + # automations will be processed by the apps service. + enabled: true + # @ignore (you shouldn't need to change this) + port: 4002 + # -- The number of automation worker replicas to run. + replicaCount: 1 + # -- The log level for the automation worker service. + logLevel: info + # -- The resources to use for automation worker pods. See + # + # for more information on how to set these. + resources: {} + # -- Extra environment variables to set for automation worker pods. Takes a list of + # name=value pairs. + extraEnv: [] + # -- Startup probe configuration for automation worker pods. You shouldn't + # need to change this, but if you want to you can find more information + # here: + # + # @default -- HTTP health checks. + startupProbe: + # @ignore + httpGet: + path: /health + port: 4002 + scheme: HTTP + # @ignore + failureThreshold: 30 + # @ignore + periodSeconds: 3 + # -- Readiness probe configuration for automation worker pods. You shouldn't + # need to change this, but if you want to you can find more information + # here: + # + # @default -- HTTP health checks. + readinessProbe: + # @ignore + httpGet: + path: /health + port: 4002 + scheme: HTTP + # @ignore + periodSeconds: 3 + # @ignore + failureThreshold: 1 + # -- Liveness probe configuration for automation worker pods. You shouldn't + # need to change this, but if you want to you can find more information + # here: + # + # @default -- HTTP health checks. + livenessProbe: + # @ignore + httpGet: + path: /health + port: 4002 + scheme: HTTP + # @ignore + failureThreshold: 3 + # @ignore + periodSeconds: 30 + autoscaling: + # -- Whether to enable horizontal pod autoscaling for the apps service. + enabled: false + minReplicas: 1 + maxReplicas: 10 + # -- Target CPU utilization percentage for the automation worker service. + # Note that for autoscaling to work, you will need to have metrics-server + # configured, and resources set for the automation worker pods. + targetCPUUtilizationPercentage: 80 + worker: + # @ignore (you shouldn't need to change this) port: 4003 + # -- The number of worker replicas to run. replicaCount: 1 + # -- The log level for the worker service. logLevel: info + # -- Whether or not to log HTTP requests to the worker service. httpLogging: 1 + # -- The resources to use for worker pods. See + # + # for more information on how to set these. resources: {} + # -- Extra environment variables to set for worker pods. Takes a list of + # name=value pairs. + extraEnv: [] + # -- Startup probe configuration for worker pods. You shouldn't need to + # change this, but if you want to you can find more information here: + # + # @default -- HTTP health checks. startupProbe: + # @ignore httpGet: path: /health port: 4003 scheme: HTTP + # @ignore failureThreshold: 30 + # @ignore periodSeconds: 3 + # -- Readiness probe configuration for worker pods. You shouldn't need to + # change this, but if you want to you can find more information here: + # + # @default -- HTTP health checks. readinessProbe: + # @ignore httpGet: path: /health port: 4003 scheme: HTTP + # @ignore periodSeconds: 3 + # @ignore failureThreshold: 1 + # -- Liveness probe configuration for worker pods. You shouldn't need to + # change this, but if you want to you can find more information here: + # + # @default -- HTTP health checks. livenessProbe: + # @ignore httpGet: path: /health port: 4003 scheme: HTTP + # @ignore failureThreshold: 3 + # @ignore periodSeconds: 5 - # annotations: - # co.elastic.logs/multiline.type: pattern - # co.elastic.logs/multiline.pattern: '^[[:space:]]' - # co.elastic.logs/multiline.negate: false - # co.elastic.logs/multiline.match: after + autoscaling: + # -- Whether to enable horizontal pod autoscaling for the worker service. + enabled: false + minReplicas: 1 + maxReplicas: 10 + # -- Target CPU utilization percentage for the worker service. Note that + # for autoscaling to work, you will need to have metrics-server + # configured, and resources set for the worker pods. + targetCPUUtilizationPercentage: 80 couchdb: + # -- Whether or not to spin up a CouchDB instance in your cluster. True by + # default, and the configuration for the CouchDB instance is under the + # `couchdb` key at the root of this file. You can see what options are + # available to you by looking at the official CouchDB Helm chart: + # . enabled: true # url: "" # only change if pointing to existing couch server # user: "" # only change if pointing to existing couch server # password: "" # only change if pointing to existing couch server port: 5984 backup: + # -- Whether or not to enable periodic CouchDB backups. This works by replicating + # to another CouchDB instance. enabled: false - # target couchDB instance to back up to + # -- Target couchDB instance to back up to, either a hostname or an IP address. target: "" - # backup interval in seconds + # -- Backup interval in seconds interval: "" + # -- The resources to use for CouchDB backup pods. See + # + # for more information on how to set these. resources: {} redis: - enabled: true # disable if using external redis + # -- Whether or not to deploy a Redis pod into your cluster. + enabled: true + # -- Port to expose Redis on. port: 6379 + # @ignore (you should leave this as 1, we don't support clustering Redis) replicaCount: 1 - url: "" # only change if pointing to existing redis cluster and enabled: false - password: "budibase" # recommended to override if using built-in redis + # -- If you choose to run Redis externally to this chart, you can specify the + # connection details here. + url: "" + # -- The password to use when connecting to Redis. It's recommended that you change + # this from the default if you're running Redis in-cluster. + password: "budibase" + # -- How much persistent storage to allocate to Redis. storage: 100Mi - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. + # -- If defined, storageClassName: If set to "-", + # storageClassName: "", which disables dynamic provisioning If undefined + # (the default) or set to null, no storageClassName spec is set, choosing + # the default provisioner. storageClass: "" + # -- The resources to use for Redis pods. See + # + # for more information on how to set these. resources: {} objectStore: - # Set to false if using another object store such as S3 + # -- Set to false if using another object store, such as S3. You will need + # to set `services.objectStore.url` to point to your bucket if you do this. minio: true + # -- Whether to enable the Minio web console or not. If you're exposing + # Minio to the Internet (via a custom Ingress record, for example), you + # should set this to false. If you're only exposing Minio to your cluster, + # you can leave this as true. browser: true + # @ignore port: 9000 + # @ignore (you should leave this as 1, we don't support clustering Minio) replicaCount: 1 - accessKey: "" # AWS_ACCESS_KEY if using S3 or existing minio access key - secretKey: "" # AWS_SECRET_ACCESS_KEY if using S3 or existing minio secret - region: "" # AWS_REGION if using S3 or existing minio secret - url: "http://minio-service:9000" # only change if pointing to existing minio cluster or S3 and minio: false + # -- AWS_ACCESS_KEY if using S3 + accessKey: "" + # -- AWS_SECRET_ACCESS_KEY if using S3 + secretKey: "" + # -- AWS_REGION if using S3 + region: "" + # -- URL to use for object storage. Only change this if you're using an + # external object store, such as S3. Remember to set `minio: false` if you + # do this. + url: "http://minio-service:9000" + # -- How much storage to give Minio in its PersistentVolumeClaim. storage: 100Mi - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. + # -- If defined, storageClassName: If set to "-", + # storageClassName: "", which disables dynamic provisioning If undefined + # (the default) or set to null, no storageClassName spec is set, choosing + # the default provisioner. storageClass: "" + # -- The resources to use for Minio pods. See + # + # for more information on how to set these. resources: {} cloudfront: - # Set the url of a distribution to enable cloudfront + # -- Set the url of a distribution to enable cloudfront. cdn: "" - # ID of public key stored in cloudfront + # -- ID of public key stored in cloudfront. publicKeyId: "" - # Base64 encoded private key for the above public key + # -- Base64 encoded private key for the above public key. privateKey64: "" -# Override values in couchDB subchart +# Override values in couchDB subchart. We're only specifying the values we're changing. +# If you want to see all of the available values, see: +# https://github.com/apache/couchdb-helm/tree/couchdb-4.3.0/couchdb couchdb: - ## clusterSize is the initial size of the CouchDB cluster. + # -- The number of replicas to run in the CouchDB cluster. We set this to + # 1 by default to make things simpler, but you can set it to 3 if you need + # a high-availability CouchDB cluster. clusterSize: 1 - allowAdminParty: false - # Secret Management - createAdminSecret: true - - # adminUsername: budibase - # adminPassword: budibase - # adminHash: -pbkdf2-this_is_not_necessarily_secure_either - # cookieAuthSecret: admin - - ## When enabled, will deploy a networkpolicy that allows CouchDB pods to - ## communicate with each other for clustering and ingress on port 5984 - networkPolicy: - enabled: true - - # Use a service account - serviceAccount: - enabled: true - create: true - # name: - # imagePullSecrets: - # - name: myimagepullsecret - - ## The storage volume used by each Pod in the StatefulSet. If a - ## persistentVolume is not enabled, the Pods will use `emptyDir` ephemeral - ## local storage. Setting the storageClass attribute to "-" disables dynamic - ## provisioning of Persistent Volumes; leaving it unset will invoke the default - ## provisioner. - persistentVolume: - enabled: false - accessModes: - - ReadWriteOnce - size: 10Gi - storageClass: "" - - ## The CouchDB image + # -- We use a custom CouchDB image for running Budibase and we don't support + # using any other CouchDB image. You shouldn't change this, and if you do we + # can't guarantee that Budibase will work. image: - repository: couchdb - tag: 3.1.1 - pullPolicy: IfNotPresent - - ## Experimental integration with Lucene-powered fulltext search - enableSearch: true - searchImage: - repository: kocolosk/couchdb-search - tag: 0.2.0 - pullPolicy: IfNotPresent - - initImage: - repository: busybox - tag: latest + # @ignore + repository: budibase/couchdb + # @ignore + tag: v3.2.1 + # @ignore pullPolicy: Always - ## CouchDB is happy to spin up cluster nodes in parallel, but if you encounter - ## problems you can try setting podManagementPolicy to the StatefulSet default - ## `OrderedReady` - podManagementPolicy: Parallel + # @ignore + # This should remain false. We ship Clouseau ourselves as part of the + # budibase/couchdb image, and it's not possible to disable it because it's a + # core part of the Budibase experience. + enableSearch: false - ## Optional pod annotations - annotations: {} - - ## Optional tolerations - tolerations: [] - - affinity: {} - - service: - # annotations: - enabled: true - type: ClusterIP - externalPort: 5984 - - ## An Ingress resource can provide name-based virtual hosting and TLS - ## termination among other things for CouchDB deployments which are accessed - ## from outside the Kubernetes cluster. - ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ - ingress: - enabled: false - hosts: - - chart-example.local - path: / - annotations: - [] - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - tls: - # Secrets must be manually created in the namespace. - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - - ## Optional resource requests and limits for the CouchDB container - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - resources: - {} - # requests: - # cpu: 100m - # memory: 128Mi - # limits: - # cpu: 56 - # memory: 256Gi - - ## erlangFlags is a map that is passed to the Erlang VM as flags using the - ## ERL_FLAGS env. `name` and `setcookie` flags are minimally required to - ## establish connectivity between cluster nodes. - ## ref: http://erlang.org/doc/man/erl.html#init_flags - erlangFlags: - name: couchdb - setcookie: monster - - ## couchdbConfig will override default CouchDB configuration settings. - ## The contents of this map are reformatted into a .ini file laid down - ## by a ConfigMap object. - ## ref: http://docs.couchdb.org/en/latest/config/index.html couchdbConfig: couchdb: - uuid: budibase-couchdb # REQUIRED: Unique identifier for this CouchDB server instance - # cluster: - # q: 8 # Create 8 shards for each database - chttpd: - bind_address: any - # chttpd.require_valid_user disables all the anonymous requests to the port - # 5984 when is set to true. - require_valid_user: false - - # Kubernetes local cluster domain. - # This is used to generate FQDNs for peers when joining the CouchDB cluster. - dns: - clusterDomainSuffix: cluster.local - - ## Configure liveness and readiness probe values - ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes - # FOR COUCHDB - livenessProbe: - failureThreshold: 3 - initialDelaySeconds: 0 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 0 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 + # -- Unique identifier for this CouchDB server instance. You shouldn't need + # to change this. + uuid: budibase-couchdb diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 77afd9453b..311afbe706 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -84,13 +84,13 @@ Component libraries are collections of components as well as the definition of t - If the project diverges from your branch, please rebase instead of merging. This makes the commit graph easier to read. -- Once your work is completed, please raise a PR against the `develop` branch with some information about what has changed and why. +- Once your work is completed, please raise a PR against the `master` branch with some information about what has changed and why. ### Getting Started For Contributors #### 1. Prerequisites -- NodeJS version `18.x.x` +- NodeJS version `20.x.x` - Python version `3.x` ### Using asdf (recommended) @@ -246,7 +246,7 @@ From here - to develop a change in pro, you can follow the below flow: cd packages/pro # get the base branch you are working from (same as monorepo) git fetch -git checkout +git checkout master # create a branch, named the same as the branch in your monorepo git checkout -b ... make changes diff --git a/docs/DEV-SETUP-DEBIAN.md b/docs/DEV-SETUP-DEBIAN.md deleted file mode 100644 index e098862c64..0000000000 --- a/docs/DEV-SETUP-DEBIAN.md +++ /dev/null @@ -1,76 +0,0 @@ -## Dev Environment on Debian 11 - -### Install NVM & Node 14 - -NVM documentation: https://github.com/nvm-sh/nvm#installing-and-updating - -Install NVM - -``` -curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.0/install.sh | bash -``` - -Install Node 14 - -``` -nvm install 14 -``` - -### Install npm requirements - -``` -npm install -g yarn jest lerna -``` - -### Install Docker and Docker Compose - -``` -apt install docker.io -pip3 install docker-compose -``` - -### Clone the repo - -``` -git clone https://github.com/Budibase/budibase.git -``` - -### Check Versions - -This setup process was tested on Debian 11 (bullseye) with version numbers show below. Your mileage may vary using anything else. - -- Docker: 20.10.5 -- Docker-Compose: 1.29.2 -- Node: v14.20.1 -- Yarn: 1.22.19 -- Lerna: 5.1.4 - -### Build - -``` -cd budibase -yarn setup -``` - -The yarn setup command runs several build steps i.e. - -``` -node ./hosting/scripts/setup.js && yarn && yarn build && yarn dev -``` - -So this command will actually run the application in dev mode. It creates .env files under `./packages/server` and `./packages/worker` and runs docker containers for each service via docker-compose. - -The dev version will be available on port 10000 i.e. - -http://127.0.0.1:10000/builder/admin - -### File descriptor issues with Vite and Chrome in Linux - -If your dev environment stalls forever, with some network requests stuck in flight, it's likely that Chrome is trying to open more file descriptors than your system allows. -To fix this, apply the following tweaks. - -Debian based distros: -Add `* - nofile 65536` to `/etc/security/limits.conf`. - -Arch: -Add `DefaultLimitNOFILE=65536` to `/etc/systemd/system.conf`. diff --git a/docs/DEV-SETUP-MACOSX.md b/docs/DEV-SETUP-MACOSX.md deleted file mode 100644 index 0e13d540b3..0000000000 --- a/docs/DEV-SETUP-MACOSX.md +++ /dev/null @@ -1,84 +0,0 @@ -## Dev Environment on MAC OSX 12 (Monterey) - -### Install Homebrew - -Install instructions [here](https://brew.sh/) - -| **NOTE**: If you are working on a M1 Apple Silicon which is running Z shell, you could need to add -`eval $(/opt/homebrew/bin/brew shellenv)` line to your `.zshrc`. This will make your zsh to find the apps you install -through brew. - -### Install Node - -Budibase requires a recent version of node 14: - -``` -brew install node npm -node -v -``` - -### Install npm requirements - -``` -npm install -g yarn jest lerna -``` - -### Install Docker and Docker Compose - -``` -brew install docker docker-compose -``` - -### Clone the repo - -``` -git clone https://github.com/Budibase/budibase.git -``` - -### Check Versions - -This setup process was tested on Mac OSX 12 (Monterey) with version numbers shown below. Your mileage may vary using anything else. - -- Docker: 20.10.14 -- Docker-Compose: 2.6.0 -- Node: 14.20.1 -- Yarn: 1.22.19 -- Lerna: 5.1.4 - -### Build - -``` -cd budibase -yarn setup -``` - -The yarn setup command runs several build steps i.e. - -``` -node ./hosting/scripts/setup.js && yarn && yarn build && yarn dev -``` - -So this command will actually run the application in dev mode. It creates .env files under `./packages/server` and `./packages/worker` and runs docker containers for each service via docker-compose. - -The dev version will be available on port 10000 i.e. - -http://127.0.0.1:10000/builder/admin - -| **NOTE**: If you are working on a M1 Apple Silicon, you will need to uncomment `# platform: linux/amd64` line in -[hosting/docker-compose-dev.yaml](../hosting/docker-compose.dev.yaml) - -### Troubleshootings - -#### Yarn setup errors - -If there are errors with the `yarn setup` command, you can try installing nvm and node 14. This is the same as the instructions for Debian 11. - -#### Node 14.20.1 not supported for arm64 - -If you are working with M1 or M2 Mac and trying the Node installation via `nvm`, probably you will find the error `curl: (22) The requested URL returned error: 404`. - -Version `v14.20.1` is not supported for arm64; in order to use it, you can switch the CPU architecture for this by the following command: - -```shell -arch -x86_64 zsh #Run this before nvm install -``` diff --git a/docs/DEV-SETUP-WINDOWS.md b/docs/DEV-SETUP-WINDOWS.md deleted file mode 100644 index f26a5a0882..0000000000 --- a/docs/DEV-SETUP-WINDOWS.md +++ /dev/null @@ -1,92 +0,0 @@ -## Dev Environment on Windows 10/11 (WSL2) - -### Install WSL with Ubuntu LTS - -Enable WSL 2 on Windows 10/11 for docker support. - -``` -wsl --set-default-version 2 -``` - -Install Ubuntu LTS. - -``` -wsl --install Ubuntu -``` - -Or follow the instruction here: -https://learn.microsoft.com/en-us/windows/wsl/install - -### Install Docker in windows - -Download the installer from docker and install it. - -Check this url for more detailed instructions: -https://docs.docker.com/desktop/install/windows-install/ - -You should follow the next steps from within the Ubuntu terminal. - -### Install NVM & Node 14 - -NVM documentation: https://github.com/nvm-sh/nvm#installing-and-updating - -Install NVM - -``` -curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.0/install.sh | bash -``` - -Install Node 14 - -``` -nvm install 14 -``` - -### Install npm requirements - -``` -npm install -g yarn jest lerna -``` - -### Clone the repo - -``` -git clone https://github.com/Budibase/budibase.git -``` - -### Check Versions - -This setup process was tested on Windows 11 with version numbers show below. Your mileage may vary using anything else. - -- Docker: 20.10.7 -- Docker-Compose: 2.10.2 -- Node: v14.20.1 -- Yarn: 1.22.19 -- Lerna: 5.5.4 - -### Build - -``` -cd budibase -yarn setup -``` - -The yarn setup command runs several build steps i.e. - -``` -node ./hosting/scripts/setup.js && yarn && yarn build && yarn dev -``` - -So this command will actually run the application in dev mode. It creates .env files under `./packages/server` and `./packages/worker` and runs docker containers for each service via docker-compose. - -The dev version will be available on port 10000 i.e. - -http://127.0.0.1:10000/builder/admin - -### Working with the code - -Here are the instructions to work on the application from within Visual Studio Code (in Windows) through the WSL. All the commands and files are within the Ubuntu system and it should run as if you were working on a Linux machine. - -https://code.visualstudio.com/docs/remote/wsl - -Note you will be able to run the application from within the WSL terminal and you will be able to access the application from the a browser in Windows. diff --git a/eslint-local-rules/index.js b/eslint-local-rules/index.js new file mode 100644 index 0000000000..a4866bc1f8 --- /dev/null +++ b/eslint-local-rules/index.js @@ -0,0 +1,88 @@ +module.exports = { + "no-budibase-imports": { + create: function (context) { + return { + ImportDeclaration(node) { + const importPath = node.source.value + + if ( + /^@budibase\/[^/]+\/.*$/.test(importPath) && + importPath !== "@budibase/backend-core/tests" && + importPath !== "@budibase/string-templates/test/utils" + ) { + context.report({ + node, + message: `Importing from @budibase is not allowed, except for @budibase/backend-core/tests and @budibase/string-templates/test/utils.`, + }) + } + }, + } + }, + }, + "no-test-com": { + meta: { + type: "problem", + docs: { + description: + "disallow the use of 'test.com' in strings and replace it with 'example.com'", + }, + schema: [], + fixable: "code", + }, + create: function (context) { + return { + Literal(node) { + if ( + typeof node.value === "string" && + node.value.includes("test.com") + ) { + context.report({ + node, + message: + "test.com is a privately owned domain and could point anywhere, use example.com instead.", + fix: function (fixer) { + const newText = node.raw.replace(/test\.com/g, "example.com") + return fixer.replaceText(node, newText) + }, + }) + } + }, + } + }, + }, + "email-domain-example-com": { + meta: { + type: "problem", + docs: { + description: + "enforce using the example.com domain for generator.email calls", + }, + fixable: "code", + schema: [], + }, + create: function (context) { + return { + CallExpression(node) { + if ( + node.callee.type === "MemberExpression" && + node.callee.object.name === "generator" && + node.callee.property.name === "email" && + node.arguments.length === 0 + ) { + context.report({ + node, + message: + "Prefer using generator.email with the domain \"{ domain: 'example.com' }\".", + fix: function (fixer) { + return fixer.replaceText( + node, + 'generator.email({ domain: "example.com" })' + ) + }, + }) + } + }, + } + }, + }, +} diff --git a/examples/nextjs-api-sales/package.json b/examples/nextjs-api-sales/package.json index 41ce52e952..9303874a77 100644 --- a/examples/nextjs-api-sales/package.json +++ b/examples/nextjs-api-sales/package.json @@ -22,6 +22,6 @@ "@types/react": "17.0.39", "eslint": "8.10.0", "eslint-config-next": "12.1.0", - "typescript": "4.6.2" + "typescript": "5.2.2" } -} \ No newline at end of file +} diff --git a/globalSetup.ts b/globalSetup.ts new file mode 100644 index 0000000000..4cb542a3c3 --- /dev/null +++ b/globalSetup.ts @@ -0,0 +1,25 @@ +import { GenericContainer, Wait } from "testcontainers" + +export default async function setup() { + await new GenericContainer("budibase/couchdb") + .withExposedPorts(5984) + .withEnvironment({ + COUCHDB_PASSWORD: "budibase", + COUCHDB_USER: "budibase", + }) + .withCopyContentToContainer([ + { + content: ` + [log] + level = warn + `, + target: "/opt/couchdb/etc/local.d/test-couchdb.ini", + }, + ]) + .withWaitStrategy( + Wait.forSuccessfulCommand( + "curl http://budibase:budibase@localhost:5984/_up" + ).withStartupTimeout(20000) + ) + .start() +} diff --git a/hosting/couchdb/Dockerfile b/hosting/couchdb/Dockerfile index 792856cac7..36abc2dd19 100644 --- a/hosting/couchdb/Dockerfile +++ b/hosting/couchdb/Dockerfile @@ -1,4 +1,101 @@ -FROM couchdb:3.2.1 +# Modified from https://github.com/apache/couchdb-docker/blob/main/3.3.3/Dockerfile +# +# Everything in this `base` image is adapted from the official `couchdb` image's +# Dockerfile. Only modifications related to upgrading from Debian bullseye to +# bookworm have been included. The `runner` image contains Budibase's +# customisations to the image, e.g. adding Clouseau. +FROM node:20-slim AS base + +# Add CouchDB user account to make sure the IDs are assigned consistently +RUN groupadd -g 5984 -r couchdb && useradd -u 5984 -d /opt/couchdb -g couchdb couchdb + +# be sure GPG and apt-transport-https are available and functional +RUN set -ex; \ + apt-get update; \ + apt-get install -y --no-install-recommends \ + apt-transport-https \ + ca-certificates \ + dirmngr \ + gnupg \ + ; \ + rm -rf /var/lib/apt/lists/* + +# grab tini for signal handling and zombie reaping +# see https://github.com/apache/couchdb-docker/pull/28#discussion_r141112407 +RUN set -eux; \ + apt-get update; \ + apt-get install -y --no-install-recommends tini; \ + rm -rf /var/lib/apt/lists/*; \ + tini --version + +# http://docs.couchdb.org/en/latest/install/unix.html#installing-the-apache-couchdb-packages +ENV GPG_COUCH_KEY \ +# gpg: rsa8192 205-01-19 The Apache Software Foundation (Package repository signing key) + 390EF70BB1EA12B2773962950EE62FB37A00258D +RUN set -eux; \ + apt-get update; \ + apt-get install -y curl; \ + export GNUPGHOME="$(mktemp -d)"; \ + curl -fL -o keys.asc https://couchdb.apache.org/repo/keys.asc; \ + gpg --batch --import keys.asc; \ + gpg --batch --export "${GPG_COUCH_KEY}" > /usr/share/keyrings/couchdb-archive-keyring.gpg; \ + command -v gpgconf && gpgconf --kill all || :; \ + rm -rf "$GNUPGHOME"; \ + apt-key list; \ + apt purge -y --autoremove curl; \ + rm -rf /var/lib/apt/lists/* + +ENV COUCHDB_VERSION 3.3.3 + +RUN . /etc/os-release; \ + echo "deb [signed-by=/usr/share/keyrings/couchdb-archive-keyring.gpg] https://apache.jfrog.io/artifactory/couchdb-deb/ ${VERSION_CODENAME} main" | \ + tee /etc/apt/sources.list.d/couchdb.list >/dev/null + +# https://github.com/apache/couchdb-pkg/blob/master/debian/README.Debian +RUN set -eux; \ + apt-get update; \ + \ + echo "couchdb couchdb/mode select none" | debconf-set-selections; \ +# we DO want recommends this time + DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-downgrades --allow-remove-essential --allow-change-held-packages \ + couchdb="$COUCHDB_VERSION"~bookworm \ + ; \ +# Undo symlinks to /var/log and /var/lib + rmdir /var/lib/couchdb /var/log/couchdb; \ + rm /opt/couchdb/data /opt/couchdb/var/log; \ + mkdir -p /opt/couchdb/data /opt/couchdb/var/log; \ + chown couchdb:couchdb /opt/couchdb/data /opt/couchdb/var/log; \ + chmod 777 /opt/couchdb/data /opt/couchdb/var/log; \ +# Remove file that sets logging to a file + rm /opt/couchdb/etc/default.d/10-filelog.ini; \ +# Check we own everything in /opt/couchdb. Matches the command in dockerfile_entrypoint.sh + find /opt/couchdb \! \( -user couchdb -group couchdb \) -exec chown -f couchdb:couchdb '{}' +; \ +# Setup directories and permissions for config. Technically these could be 555 and 444 respectively +# but we keep them as 755 and 644 for consistency with CouchDB defaults and the dockerfile_entrypoint.sh. + find /opt/couchdb/etc -type d ! -perm 0755 -exec chmod -f 0755 '{}' +; \ + find /opt/couchdb/etc -type f ! -perm 0644 -exec chmod -f 0644 '{}' +; \ +# only local.d needs to be writable for the docker_entrypoint.sh + chmod -f 0777 /opt/couchdb/etc/local.d; \ +# apt clean-up + rm -rf /var/lib/apt/lists/*; + +# Add configuration +COPY --chown=couchdb:couchdb couch/10-docker-default.ini /opt/couchdb/etc/default.d/ +# COPY --chown=couchdb:couchdb vm.args /opt/couchdb/etc/ + +COPY docker-entrypoint.sh /usr/local/bin +RUN ln -s usr/local/bin/docker-entrypoint.sh /docker-entrypoint.sh # backwards compat +ENTRYPOINT ["tini", "--", "/docker-entrypoint.sh"] + +VOLUME /opt/couchdb/data + +# 5984: Main CouchDB endpoint +# 4369: Erlang portmap daemon (epmd) +# 9100: CouchDB cluster communication port +EXPOSE 5984 4369 9100 +CMD ["/opt/couchdb/bin/couchdb"] + +FROM base as runner ENV COUCHDB_USER admin ENV COUCHDB_PASSWORD admin @@ -6,9 +103,9 @@ EXPOSE 5984 RUN apt-get update && apt-get install -y --no-install-recommends software-properties-common wget unzip curl && \ wget -O - https://packages.adoptium.net/artifactory/api/gpg/key/public | apt-key add - && \ - apt-add-repository 'deb http://security.debian.org/debian-security bullseye-security/updates main' && \ + apt-add-repository 'deb http://security.debian.org/debian-security bookworm-security/updates main' && \ apt-add-repository 'deb http://archive.debian.org/debian stretch-backports main' && \ - apt-add-repository 'deb https://packages.adoptium.net/artifactory/deb bullseye main' && \ + apt-add-repository 'deb https://packages.adoptium.net/artifactory/deb bookworm main' && \ apt-get update && apt-get install -y --no-install-recommends temurin-8-jdk && \ rm -rf /var/lib/apt/lists/ @@ -29,7 +126,6 @@ WORKDIR /opt/couchdb ADD couch/vm.args couch/local.ini ./etc/ WORKDIR / -ADD build-target-paths.sh . ADD runner.sh ./bbcouch-runner.sh -RUN chmod +x ./bbcouch-runner.sh /opt/clouseau/bin/clouseau ./build-target-paths.sh +RUN chmod +x ./bbcouch-runner.sh /opt/clouseau/bin/clouseau CMD ["./bbcouch-runner.sh"] diff --git a/hosting/couchdb/build-target-paths.sh b/hosting/couchdb/build-target-paths.sh deleted file mode 100644 index 67e1765ca8..0000000000 --- a/hosting/couchdb/build-target-paths.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -echo ${TARGETBUILD} > /buildtarget.txt -if [[ "${TARGETBUILD}" = "aas" ]]; then - # Azure AppService uses /home for persisent data & SSH on port 2222 - DATA_DIR=/home - WEBSITES_ENABLE_APP_SERVICE_STORAGE=true - mkdir -p $DATA_DIR/{search,minio,couch} - mkdir -p $DATA_DIR/couch/{dbs,views} - chown -R couchdb:couchdb $DATA_DIR/couch/ - apt update - apt-get install -y openssh-server - echo "root:Docker!" | chpasswd - mkdir -p /tmp - chmod +x /tmp/ssh_setup.sh \ - && (sleep 1;/tmp/ssh_setup.sh 2>&1 > /dev/null) - cp /etc/sshd_config /etc/ssh/sshd_config - /etc/init.d/ssh restart - sed -i "s#DATA_DIR#/home#g" /opt/clouseau/clouseau.ini - sed -i "s#DATA_DIR#/home#g" /opt/couchdb/etc/local.ini -else - sed -i "s#DATA_DIR#/data#g" /opt/clouseau/clouseau.ini - sed -i "s#DATA_DIR#/data#g" /opt/couchdb/etc/local.ini -fi \ No newline at end of file diff --git a/hosting/couchdb/clouseau/clouseau.ini b/hosting/couchdb/clouseau/clouseau.ini index 578a5acafa..014fb854f3 100644 --- a/hosting/couchdb/clouseau/clouseau.ini +++ b/hosting/couchdb/clouseau/clouseau.ini @@ -4,7 +4,7 @@ name=clouseau@127.0.0.1 ; set this to the same distributed Erlang cookie used by the CouchDB nodes -cookie=monster +cookie=COUCHDB_ERLANG_COOKIE ; the path where you would like to store the search index files dir=DATA_DIR/search diff --git a/hosting/couchdb/couch/10-docker-default.ini b/hosting/couchdb/couch/10-docker-default.ini new file mode 100644 index 0000000000..1aa633cbfc --- /dev/null +++ b/hosting/couchdb/couch/10-docker-default.ini @@ -0,0 +1,8 @@ +; CouchDB Configuration Settings + +; Custom settings should be made in this file. They will override settings +; in default.ini, but unlike changes made to default.ini, this file won't be +; overwritten on server upgrade. + +[chttpd] +bind_address = any diff --git a/hosting/couchdb/couch/vm.args b/hosting/couchdb/couch/vm.args index e9e4416863..11845dd6d4 100644 --- a/hosting/couchdb/couch/vm.args +++ b/hosting/couchdb/couch/vm.args @@ -12,7 +12,7 @@ # erlang cookie for clouseau security -name couchdb@127.0.0.1 --setcookie monster +-setcookie COUCHDB_ERLANG_COOKIE # Ensure that the Erlang VM listens on a known port -kernel inet_dist_listen_min 9100 diff --git a/hosting/couchdb/docker-entrypoint.sh b/hosting/couchdb/docker-entrypoint.sh new file mode 100755 index 0000000000..bd709b7b73 --- /dev/null +++ b/hosting/couchdb/docker-entrypoint.sh @@ -0,0 +1,122 @@ +#!/bin/bash +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. + +set -e + +# first arg is `-something` or `+something` +if [ "${1#-}" != "$1" ] || [ "${1#+}" != "$1" ]; then + set -- /opt/couchdb/bin/couchdb "$@" +fi + +# first arg is the bare word `couchdb` +if [ "$1" = 'couchdb' ]; then + shift + set -- /opt/couchdb/bin/couchdb "$@" +fi + +if [ "$1" = '/opt/couchdb/bin/couchdb' ]; then + # this is where runtime configuration changes will be written. + # we need to explicitly touch it here in case /opt/couchdb/etc has + # been mounted as an external volume, in which case it won't exist. + # If running as the couchdb user (i.e. container starts as root), + # write permissions will be granted below. + touch /opt/couchdb/etc/local.d/docker.ini + + # if user is root, assume running under the couchdb user (default) + # and ensure it is able to access files and directories that may be mounted externally + if [ "$(id -u)" = '0' ]; then + # Check that we own everything in /opt/couchdb and fix if necessary. We also + # add the `-f` flag in all the following invocations because there may be + # cases where some of these ownership and permissions issues are non-fatal + # (e.g. a config file owned by root with o+r is actually fine), and we don't + # to be too aggressive about crashing here ... + find /opt/couchdb \! \( -user couchdb -group couchdb \) -exec chown -f couchdb:couchdb '{}' + + + # Ensure that data files have the correct permissions. We were previously + # preventing any access to these files outside of couchdb:couchdb, but it + # turns out that CouchDB itself does not set such restrictive permissions + # when it creates the files. The approach taken here ensures that the + # contents of the datadir have the same permissions as they had when they + # were initially created. This should minimize any startup delay. + find /opt/couchdb/data -type d ! -perm 0755 -exec chmod -f 0755 '{}' + + find /opt/couchdb/data -type f ! -perm 0644 -exec chmod -f 0644 '{}' + + + # Do the same thing for configuration files and directories. Technically + # CouchDB only needs read access to the configuration files as all online + # changes will be applied to the "docker.ini" file below, but we set 644 + # for the sake of consistency. + find /opt/couchdb/etc -type d ! -perm 0755 -exec chmod -f 0755 '{}' + + find /opt/couchdb/etc -type f ! -perm 0644 -exec chmod -f 0644 '{}' + + fi + + if [ ! -z "$NODENAME" ] && ! grep "couchdb@" /opt/couchdb/etc/vm.args; then + echo "-name couchdb@$NODENAME" >> /opt/couchdb/etc/vm.args + fi + + if [ "$COUCHDB_USER" ] && [ "$COUCHDB_PASSWORD" ]; then + # Create admin only if not already present + if ! grep -Pzoqr "\[admins\]\n$COUCHDB_USER =" /opt/couchdb/etc/local.d/*.ini /opt/couchdb/etc/local.ini; then + printf "\n[admins]\n%s = %s\n" "$COUCHDB_USER" "$COUCHDB_PASSWORD" >> /opt/couchdb/etc/local.d/docker.ini + fi + fi + + if [ "$COUCHDB_SECRET" ]; then + # Set secret only if not already present + if ! grep -Pzoqr "\[chttpd_auth\]\nsecret =" /opt/couchdb/etc/local.d/*.ini /opt/couchdb/etc/local.ini; then + printf "\n[chttpd_auth]\nsecret = %s\n" "$COUCHDB_SECRET" >> /opt/couchdb/etc/local.d/docker.ini + fi + fi + + if [ "$COUCHDB_ERLANG_COOKIE" ]; then + cookieFile='/opt/couchdb/.erlang.cookie' + if [ -e "$cookieFile" ]; then + if [ "$(cat "$cookieFile" 2>/dev/null)" != "$COUCHDB_ERLANG_COOKIE" ]; then + echo >&2 + echo >&2 "warning: $cookieFile contents do not match COUCHDB_ERLANG_COOKIE" + echo >&2 + fi + else + echo "$COUCHDB_ERLANG_COOKIE" > "$cookieFile" + fi + chown couchdb:couchdb "$cookieFile" + chmod 600 "$cookieFile" + fi + + if [ "$(id -u)" = '0' ]; then + chown -f couchdb:couchdb /opt/couchdb/etc/local.d/docker.ini || true + fi + + # if we don't find an [admins] section followed by a non-comment, display a warning + if ! grep -Pzoqr '\[admins\]\n[^;]\w+' /opt/couchdb/etc/default.d/*.ini /opt/couchdb/etc/local.d/*.ini /opt/couchdb/etc/local.ini; then + # The - option suppresses leading tabs but *not* spaces. :) + cat >&2 <<-'EOWARN' +************************************************************* +ERROR: CouchDB 3.0+ will no longer run in "Admin Party" + mode. You *MUST* specify an admin user and + password, either via your own .ini file mapped + into the container at /opt/couchdb/etc/local.ini + or inside /opt/couchdb/etc/local.d, or with + "-e COUCHDB_USER=admin -e COUCHDB_PASSWORD=password" + to set it via "docker run". +************************************************************* +EOWARN + exit 1 + fi + + if [ "$(id -u)" = '0' ]; then + export HOME=$(echo ~couchdb) + exec setpriv --reuid=couchdb --regid=couchdb --clear-groups "$@" + fi +fi + +exec "$@" \ No newline at end of file diff --git a/hosting/couchdb/runner.sh b/hosting/couchdb/runner.sh index 4102d2a751..aaadee6b43 100644 --- a/hosting/couchdb/runner.sh +++ b/hosting/couchdb/runner.sh @@ -1,14 +1,85 @@ #!/bin/bash DATA_DIR=${DATA_DIR:-/data} +COUCHDB_ERLANG_COOKIE=${COUCHDB_ERLANG_COOKIE:-B9CFC32C-3458-4A86-8448-B3C753991CA7} + mkdir -p ${DATA_DIR} mkdir -p ${DATA_DIR}/couch/{dbs,views} mkdir -p ${DATA_DIR}/search chown -R couchdb:couchdb ${DATA_DIR}/couch -/build-target-paths.sh + +echo ${TARGETBUILD} > /buildtarget.txt +if [[ "${TARGETBUILD}" = "aas" ]]; then + # Azure AppService uses /home for persistent data & SSH on port 2222 + DATA_DIR="${DATA_DIR:-/home}" + WEBSITES_ENABLE_APP_SERVICE_STORAGE=true + mkdir -p $DATA_DIR/{search,minio,couch} + mkdir -p $DATA_DIR/couch/{dbs,views} + chown -R couchdb:couchdb $DATA_DIR/couch/ + apt update + apt-get install -y openssh-server + echo "root:Docker!" | chpasswd + mkdir -p /tmp + chmod +x /tmp/ssh_setup.sh \ + && (sleep 1;/tmp/ssh_setup.sh 2>&1 > /dev/null) + cp /etc/sshd_config /etc/ssh/sshd_config + /etc/init.d/ssh restart + sed -i "s#DATA_DIR#/home#g" /opt/clouseau/clouseau.ini + sed -i "s#DATA_DIR#/home#g" /opt/couchdb/etc/local.ini +elif [[ "${TARGETBUILD}" = "single" ]]; then + # In the single image build, the Dockerfile specifies /data as a volume + # mount, so we use that for all persistent data. + sed -i "s#DATA_DIR#/data#g" /opt/clouseau/clouseau.ini + sed -i "s#DATA_DIR#/data#g" /opt/couchdb/etc/local.ini +elif [[ "${TARGETBUILD}" = "docker-compose" ]]; then + # We remove the database_dir and view_index_dir settings from the local.ini + # in docker-compose because it will default to /opt/couchdb/data which is what + # our docker-compose was using prior to us switching to using our own CouchDB + # image. + sed -i "s#^database_dir.*\$##g" /opt/couchdb/etc/local.ini + sed -i "s#^view_index_dir.*\$##g" /opt/couchdb/etc/local.ini + sed -i "s#^dir=.*\$#dir=/opt/couchdb/data#g" /opt/clouseau/clouseau.ini +elif [[ -n $KUBERNETES_SERVICE_HOST ]]; then + # In Kubernetes the directory /opt/couchdb/data has a persistent volume + # mount for storing database data. + sed -i "s#^dir=.*\$#dir=/opt/couchdb/data#g" /opt/clouseau/clouseau.ini + + # We remove the database_dir and view_index_dir settings from the local.ini + # in Kubernetes because it will default to /opt/couchdb/data which is what + # our Helm chart was using prior to us switching to using our own CouchDB + # image. + sed -i "s#^database_dir.*\$##g" /opt/couchdb/etc/local.ini + sed -i "s#^view_index_dir.*\$##g" /opt/couchdb/etc/local.ini + + # We remove the -name setting from the vm.args file in Kubernetes because + # it will default to the pod FQDN, which is what's required for clustering + # to work. + sed -i "s/^-name .*$//g" /opt/couchdb/etc/vm.args +else + # For all other builds, we use /data for persistent data. + sed -i "s#DATA_DIR#/data#g" /opt/clouseau/clouseau.ini + sed -i "s#DATA_DIR#/data#g" /opt/couchdb/etc/local.ini +fi + +sed -i "s#COUCHDB_ERLANG_COOKIE#${COUCHDB_ERLANG_COOKIE}#g" /opt/couchdb/etc/vm.args +sed -i "s#COUCHDB_ERLANG_COOKIE#${COUCHDB_ERLANG_COOKIE}#g" /opt/clouseau/clouseau.ini + +# Start Clouseau. Budibase won't function correctly without Clouseau running, it +# powers the search API endpoints which are used to do all sorts, including +# populating app grids. /opt/clouseau/bin/clouseau > /dev/stdout 2>&1 & + +# Start CouchDB. /docker-entrypoint.sh /opt/couchdb/bin/couchdb & -sleep 10 -curl -X PUT http://${COUCHDB_USER}:${COUCHDB_PASSWORD}@localhost:5984/_users -curl -X PUT http://${COUCHDB_USER}:${COUCHDB_PASSWORD}@localhost:5984/_replicator + +# Wati for CouchDB to start up. +while [[ $(curl -s -w "%{http_code}\n" http://localhost:5984/_up -o /dev/null) -ne 200 ]]; do + echo 'Waiting for CouchDB to start...'; + sleep 5; +done + +# CouchDB needs the `_users` and `_replicator` databases to exist before it will +# function correctly, so we create them here. +curl -X PUT -u "${COUCHDB_USER}:${COUCHDB_PASSWORD}" http://localhost:5984/_users +curl -X PUT -u "${COUCHDB_USER}:${COUCHDB_PASSWORD}" http://localhost:5984/_replicator sleep infinity \ No newline at end of file diff --git a/hosting/docker-compose.build.yaml b/hosting/docker-compose.build.yaml index 7ead001a1c..dbc3613599 100644 --- a/hosting/docker-compose.build.yaml +++ b/hosting/docker-compose.build.yaml @@ -6,7 +6,7 @@ services: app-service: build: context: .. - dockerfile: packages/server/Dockerfile.v2 + dockerfile: packages/server/Dockerfile args: - BUDIBASE_VERSION=0.0.0+dev-docker container_name: build-bbapps @@ -36,7 +36,7 @@ services: worker-service: build: context: .. - dockerfile: packages/worker/Dockerfile.v2 + dockerfile: packages/worker/Dockerfile args: - BUDIBASE_VERSION=0.0.0+dev-docker container_name: build-bbworker diff --git a/hosting/docker-compose.yaml b/hosting/docker-compose.yaml index 8f66d211f7..a72b36aef1 100644 --- a/hosting/docker-compose.yaml +++ b/hosting/docker-compose.yaml @@ -26,7 +26,7 @@ services: BB_ADMIN_USER_EMAIL: ${BB_ADMIN_USER_EMAIL} BB_ADMIN_USER_PASSWORD: ${BB_ADMIN_USER_PASSWORD} PLUGINS_DIR: ${PLUGINS_DIR} - OFFLINE_MODE: ${OFFLINE_MODE} + OFFLINE_MODE: ${OFFLINE_MODE:-} depends_on: - worker-service - redis-service @@ -53,11 +53,10 @@ services: INTERNAL_API_KEY: ${INTERNAL_API_KEY} REDIS_URL: redis-service:6379 REDIS_PASSWORD: ${REDIS_PASSWORD} - OFFLINE_MODE: ${OFFLINE_MODE} + OFFLINE_MODE: ${OFFLINE_MODE:-} depends_on: - redis-service - minio-service - - couch-init minio-service: restart: unless-stopped @@ -70,7 +69,7 @@ services: MINIO_BROWSER: "off" command: server /data --console-address ":9001" healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] + test: "timeout 5s bash -c ':> /dev/tcp/127.0.0.1/9000' || exit 1" interval: 30s timeout: 20s retries: 3 @@ -98,30 +97,18 @@ services: couchdb-service: restart: unless-stopped - image: ibmcom/couchdb3 + image: budibase/couchdb environment: - COUCHDB_PASSWORD=${COUCH_DB_PASSWORD} - COUCHDB_USER=${COUCH_DB_USER} + - TARGETBUILD=docker-compose volumes: - couchdb3_data:/opt/couchdb/data - couch-init: - image: curlimages/curl - environment: - PUT_CALL: "curl -u ${COUCH_DB_USER}:${COUCH_DB_PASSWORD} -X PUT couchdb-service:5984" - depends_on: - - couchdb-service - command: - [ - "sh", - "-c", - "sleep 10 && $${PUT_CALL}/_users && $${PUT_CALL}/_replicator; fg;", - ] - redis-service: restart: unless-stopped image: redis - command: redis-server --requirepass ${REDIS_PASSWORD} + command: redis-server --requirepass "${REDIS_PASSWORD}" volumes: - redis_data:/data diff --git a/hosting/letsencrypt/nginx-ssl.conf b/hosting/letsencrypt/nginx-ssl.conf index 50c5e0198a..b3f51e5cc5 100644 --- a/hosting/letsencrypt/nginx-ssl.conf +++ b/hosting/letsencrypt/nginx-ssl.conf @@ -2,16 +2,18 @@ server { listen 443 ssl default_server; listen [::]:443 ssl default_server; server_name _; - ssl_certificate /etc/letsencrypt/live/CUSTOM_DOMAIN/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/CUSTOM_DOMAIN/privkey.pem; - include /etc/letsencrypt/options-ssl-nginx.conf; - ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; - + error_log /dev/stderr warn; + access_log /dev/stdout main; client_max_body_size 1000m; ignore_invalid_headers off; proxy_buffering off; # port_in_redirect off; + ssl_certificate /etc/letsencrypt/live/CUSTOM_DOMAIN/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/CUSTOM_DOMAIN/privkey.pem; + include /etc/letsencrypt/options-ssl-nginx.conf; + ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; + location ^~ /.well-known/acme-challenge/ { default_type "text/plain"; root /var/www/html; @@ -47,6 +49,24 @@ server { rewrite ^/worker/(.*)$ /$1 break; } + location /api/backups/ { + # calls to export apps are limited + limit_req zone=ratelimit burst=20 nodelay; + + # 1800s timeout for app export requests + proxy_read_timeout 1800s; + proxy_connect_timeout 1800s; + proxy_send_timeout 1800s; + + proxy_http_version 1.1; + proxy_set_header Connection $connection_upgrade; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + + proxy_pass http://127.0.0.1:4001; + } + location /api/ { # calls to the API are rate limited with bursting limit_req zone=ratelimit burst=20 nodelay; @@ -70,18 +90,49 @@ server { rewrite ^/db/(.*)$ /$1 break; } + location /socket/ { + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_set_header Host $host; + proxy_cache_bypass $http_upgrade; + proxy_pass http://127.0.0.1:4001; + } + location / { proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $http_host; proxy_connect_timeout 300; proxy_http_version 1.1; proxy_set_header Connection ""; chunked_transfer_encoding off; + proxy_pass http://127.0.0.1:9000; } + location /files/signed/ { + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # IMPORTANT: Signed urls will inspect the host header of the request. + # Normally a signed url will need to be generated with a specified client host in mind. + # To support dynamic hosts, e.g. some unknown self-hosted installation url, + # use a predefined host header. The host 'minio-service' is also used at the time of url signing. + proxy_set_header Host minio-service; + + proxy_connect_timeout 300; + proxy_http_version 1.1; + proxy_set_header Connection ""; + chunked_transfer_encoding off; + + proxy_pass http://127.0.0.1:9000; + rewrite ^/files/signed/(.*)$ /$1 break; + } + client_header_timeout 60; client_body_timeout 60; keepalive_timeout 60; diff --git a/hosting/nginx.dev.conf b/hosting/nginx.dev.conf index 915125cbce..f0a58a9a98 100644 --- a/hosting/nginx.dev.conf +++ b/hosting/nginx.dev.conf @@ -42,7 +42,7 @@ http { server { listen 10000 default_server; server_name _; - client_max_body_size 1000m; + client_max_body_size 50000m; ignore_invalid_headers off; proxy_buffering off; diff --git a/hosting/proxy/error.html b/hosting/proxy/error.html index 023c1ebaff..545d6c7f6d 100644 --- a/hosting/proxy/error.html +++ b/hosting/proxy/error.html @@ -57,8 +57,8 @@ --spectrum-global-color-gray-600: rgb(144,144,144); --spectrum-global-color-gray-900: rgb(255,255,255); --spectrum-global-color-gray-800: rgb(227,227,227); - --spectrum-global-color-static-blue-600: rgb(20,115,230); - --spectrum-global-color-static-blue-hover: rgb( 18, 103, 207); + --bb-indigo: #6E56FF; + --bb-indigo-light: #9F8FFF; } html, body { @@ -90,15 +90,8 @@ .info { display: flex; flex-direction: column; - align-items: left; + align-items: flex-start; } - - @media only screen and (max-width: 600px) { - .info { - align-items: center; - } - } - .status { color: var(--spectrum-global-color-gray-600) } @@ -113,13 +106,14 @@ .buttons { display: flex; flex-direction: row; + justify-content: flex-start; margin-top: 15px; } .homeButton { - background-color: var(--spectrum-global-color-static-blue-600); + background-color: var(--bb-indigo); } .homeButton:hover { - background-color: var(--spectrum-global-color-static-blue-hover); + background-color: var(--bb-indigo-light); } .statusButton { background-color: transparent; @@ -127,20 +121,30 @@ border: none; } .hero { - height: 160px; - width: 160px; - margin-right: 80px; + height: 60px; + margin: 10px 40px 10px 0; + } + .hero img { + height: 100%; } .content { display: flex; flex-direction: row; - align-items: flex-end; + align-items: center; justify-content: center; + padding: 0 40px; + } + h1 { + margin-bottom: 10px; + } + h3 { + margin-top: 0; } @media only screen and (max-width: 600px) { .content { flex-direction: column; + align-items: flex-start; } } @@ -152,16 +156,15 @@
- Budibase Logo + Budibase Logo
-

+

 

Houston we have a problem!

-

-

+

 

diff --git a/hosting/proxy/nginx.prod.conf b/hosting/proxy/nginx.prod.conf index 6da2e4a1c3..65cc3ff390 100644 --- a/hosting/proxy/nginx.prod.conf +++ b/hosting/proxy/nginx.prod.conf @@ -249,4 +249,31 @@ http { gzip_comp_level 6; gzip_types text/plain text/css text/xml application/json application/javascript application/rss+xml application/atom+xml image/svg+xml; } + + # From https://docs.datadoghq.com/integrations/nginx/?tab=kubernetes + server { + listen 81; + server_name localhost; + + access_log off; + allow 127.0.0.1; + allow 10.0.0.0/8; + deny all; + + location /nginx_status { + # Choose your status module + + # freely available with open source NGINX + stub_status; + + # for open source NGINX < version 1.7.5 + # stub_status on; + + # available only with NGINX Plus + # status; + + # ensures the version information can be retrieved + server_tokens on; + } + } } diff --git a/hosting/scripts/build-target-paths.sh b/hosting/scripts/build-target-paths.sh deleted file mode 100644 index 67e1765ca8..0000000000 --- a/hosting/scripts/build-target-paths.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -echo ${TARGETBUILD} > /buildtarget.txt -if [[ "${TARGETBUILD}" = "aas" ]]; then - # Azure AppService uses /home for persisent data & SSH on port 2222 - DATA_DIR=/home - WEBSITES_ENABLE_APP_SERVICE_STORAGE=true - mkdir -p $DATA_DIR/{search,minio,couch} - mkdir -p $DATA_DIR/couch/{dbs,views} - chown -R couchdb:couchdb $DATA_DIR/couch/ - apt update - apt-get install -y openssh-server - echo "root:Docker!" | chpasswd - mkdir -p /tmp - chmod +x /tmp/ssh_setup.sh \ - && (sleep 1;/tmp/ssh_setup.sh 2>&1 > /dev/null) - cp /etc/sshd_config /etc/ssh/sshd_config - /etc/init.d/ssh restart - sed -i "s#DATA_DIR#/home#g" /opt/clouseau/clouseau.ini - sed -i "s#DATA_DIR#/home#g" /opt/couchdb/etc/local.ini -else - sed -i "s#DATA_DIR#/data#g" /opt/clouseau/clouseau.ini - sed -i "s#DATA_DIR#/data#g" /opt/couchdb/etc/local.ini -fi \ No newline at end of file diff --git a/hosting/single/Dockerfile b/hosting/single/Dockerfile index c7b90dbdc4..be01056b53 100644 --- a/hosting/single/Dockerfile +++ b/hosting/single/Dockerfile @@ -1,28 +1,41 @@ -FROM node:18-slim as build +FROM node:20-slim as build # install node-gyp dependencies -RUN apt-get update && apt-get upgrade -y && apt-get install -y --no-install-recommends apt-utils cron g++ make python3 +RUN apt-get update && apt-get install -y --no-install-recommends g++ make python3 jq -# add pin script -WORKDIR / -ADD scripts/cleanup.sh ./ -RUN chmod +x /cleanup.sh - -# build server +# copy and install dependencies WORKDIR /app -ADD packages/server . +COPY package.json . COPY yarn.lock . -RUN yarn install --production=true --network-timeout 1000000 -RUN /cleanup.sh +COPY lerna.json . +COPY .yarnrc . -# build worker -WORKDIR /worker -ADD packages/worker . -COPY yarn.lock . -RUN yarn install --production=true --network-timeout 1000000 -RUN /cleanup.sh +COPY packages/server/package.json packages/server/package.json +COPY packages/worker/package.json packages/worker/package.json -FROM budibase/couchdb + +COPY scripts/removeWorkspaceDependencies.sh scripts/removeWorkspaceDependencies.sh +RUN chmod +x ./scripts/removeWorkspaceDependencies.sh +RUN ./scripts/removeWorkspaceDependencies.sh packages/server/package.json +RUN ./scripts/removeWorkspaceDependencies.sh packages/worker/package.json + + +# We will never want to sync pro, but the script is still required +RUN echo '' > scripts/syncProPackage.js +RUN jq 'del(.scripts.postinstall)' package.json > temp.json && mv temp.json package.json +RUN ./scripts/removeWorkspaceDependencies.sh package.json +RUN --mount=type=cache,target=/root/.yarn YARN_CACHE_FOLDER=/root/.yarn yarn install --production --frozen-lockfile + +# copy the actual code +COPY packages/server/dist packages/server/dist +COPY packages/server/pm2.config.js packages/server/pm2.config.js +COPY packages/server/client packages/server/client +COPY packages/server/builder packages/server/builder +COPY packages/worker/dist packages/worker/dist +COPY packages/worker/pm2.config.js packages/worker/pm2.config.js + + +FROM budibase/couchdb:v3.3.3 as runner ARG TARGETARCH ENV TARGETARCH $TARGETARCH #TARGETBUILD can be set to single (for single docker image) or aas (for azure app service) @@ -30,31 +43,24 @@ ENV TARGETARCH $TARGETARCH ARG TARGETBUILD=single ENV TARGETBUILD $TARGETBUILD -COPY --from=build /app /app -COPY --from=build /worker /worker - # install base dependencies RUN apt-get update && \ - apt-get install -y --no-install-recommends software-properties-common nginx uuid-runtime redis-server + apt-get install -y --no-install-recommends software-properties-common nginx uuid-runtime redis-server libaio1 # Install postgres client for pg_dump utils -RUN apt install software-properties-common apt-transport-https gpg -y \ - && curl -fsSl https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor | tee /usr/share/keyrings/postgresql.gpg > /dev/null \ - && echo deb [arch=amd64,arm64,ppc64el signed-by=/usr/share/keyrings/postgresql.gpg] http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main | tee /etc/apt/sources.list.d/postgresql.list \ - && apt update -y \ - && apt install postgresql-client-15 -y \ - && apt remove software-properties-common apt-transport-https gpg -y +RUN apt install -y software-properties-common apt-transport-https ca-certificates gnupg \ + && curl -fsSl https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor | tee /usr/share/keyrings/postgresql.gpg > /dev/null \ + && echo deb [arch=amd64,arm64,ppc64el signed-by=/usr/share/keyrings/postgresql.gpg] http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main | tee /etc/apt/sources.list.d/postgresql.list \ + && apt update -y \ + && apt install postgresql-client-15 -y \ + && apt remove software-properties-common apt-transport-https gpg -y -# install other dependencies, nodejs, oracle requirements, jdk8, redis, nginx -WORKDIR /nodejs -RUN curl -sL https://deb.nodesource.com/setup_16.x -o /tmp/nodesource_setup.sh && \ - bash /tmp/nodesource_setup.sh && \ - apt-get install -y --no-install-recommends libaio1 nodejs && \ - npm install --global yarn pm2 +# We use pm2 in order to run multiple node processes in a single container +RUN npm install --global pm2 # setup nginx -ADD hosting/single/nginx/nginx.conf /etc/nginx -ADD hosting/single/nginx/nginx-default-site.conf /etc/nginx/sites-enabled/default +COPY hosting/single/nginx/nginx.conf /etc/nginx +COPY hosting/single/nginx/nginx-default-site.conf /etc/nginx/sites-enabled/default RUN mkdir -p /var/log/nginx && \ touch /var/log/nginx/error.log && \ touch /var/run/nginx.pid && \ @@ -62,29 +68,36 @@ RUN mkdir -p /var/log/nginx && \ WORKDIR / RUN mkdir -p scripts/integrations/oracle -ADD packages/server/scripts/integrations/oracle scripts/integrations/oracle +COPY packages/server/scripts/integrations/oracle scripts/integrations/oracle RUN /bin/bash -e ./scripts/integrations/oracle/instantclient/linux/install.sh # setup minio WORKDIR /minio -ADD scripts/install-minio.sh ./install.sh +COPY scripts/install-minio.sh ./install.sh RUN chmod +x install.sh && ./install.sh # setup runner file WORKDIR / -ADD hosting/single/runner.sh . +COPY hosting/single/runner.sh . RUN chmod +x ./runner.sh -ADD hosting/single/healthcheck.sh . +COPY hosting/single/healthcheck.sh . RUN chmod +x ./healthcheck.sh # Script below sets the path for storing data based on $DATA_DIR # For Azure App Service install SSH & point data locations to /home -ADD hosting/single/ssh/sshd_config /etc/ -ADD hosting/single/ssh/ssh_setup.sh /tmp -RUN /build-target-paths.sh +COPY hosting/single/ssh/sshd_config /etc/ +COPY hosting/single/ssh/ssh_setup.sh /tmp + +# setup letsencrypt certificate +RUN apt-get install -y certbot python3-certbot-nginx +COPY hosting/letsencrypt /app/letsencrypt +RUN chmod +x /app/letsencrypt/certificate-request.sh /app/letsencrypt/certificate-renew.sh + +COPY --from=build /app/node_modules /node_modules +COPY --from=build /app/package.json /package.json +COPY --from=build /app/packages/server /app +COPY --from=build /app/packages/worker /worker -# cleanup cache -RUN yarn cache clean -f EXPOSE 80 EXPOSE 443 @@ -92,25 +105,17 @@ EXPOSE 443 EXPOSE 2222 VOLUME /data -# setup letsencrypt certificate -RUN apt-get install -y certbot python3-certbot-nginx -ADD hosting/letsencrypt /app/letsencrypt -RUN chmod +x /app/letsencrypt/certificate-request.sh /app/letsencrypt/certificate-renew.sh -# Remove cached files -RUN rm -rf \ - /root/.cache \ - /root/.npm \ - /root/.pip \ - /usr/local/share/doc \ - /usr/share/doc \ - /usr/share/man \ - /var/lib/apt/lists/* \ - /tmp/* +ARG BUDIBASE_VERSION +# Ensuring the version argument is sent +RUN test -n "$BUDIBASE_VERSION" +ENV BUDIBASE_VERSION=$BUDIBASE_VERSION HEALTHCHECK --interval=15s --timeout=15s --start-period=45s CMD "/healthcheck.sh" # must set this just before running ENV NODE_ENV=production +# this is required for isolated-vm to work on Node 20+ +ENV NODE_OPTIONS="--no-node-snapshot" WORKDIR / CMD ["./runner.sh"] diff --git a/hosting/single/Dockerfile.v2 b/hosting/single/Dockerfile.v2 deleted file mode 100644 index ec03a1b5a2..0000000000 --- a/hosting/single/Dockerfile.v2 +++ /dev/null @@ -1,131 +0,0 @@ -FROM node:18-slim as build - -# install node-gyp dependencies -RUN apt-get update && apt-get install -y --no-install-recommends g++ make python3 jq - - -# copy and install dependencies -WORKDIR /app -COPY package.json . -COPY yarn.lock . -COPY lerna.json . -COPY .yarnrc . - -COPY packages/server/package.json packages/server/package.json -COPY packages/worker/package.json packages/worker/package.json -# string-templates does not get bundled during the esbuild process, so we want to use the local version -COPY packages/string-templates/package.json packages/string-templates/package.json - - -COPY scripts/removeWorkspaceDependencies.sh scripts/removeWorkspaceDependencies.sh -RUN chmod +x ./scripts/removeWorkspaceDependencies.sh -RUN ./scripts/removeWorkspaceDependencies.sh packages/server/package.json -RUN ./scripts/removeWorkspaceDependencies.sh packages/worker/package.json - - -# We will never want to sync pro, but the script is still required -RUN echo '' > scripts/syncProPackage.js -RUN jq 'del(.scripts.postinstall)' package.json > temp.json && mv temp.json package.json -RUN ./scripts/removeWorkspaceDependencies.sh package.json -RUN --mount=type=cache,target=/root/.yarn YARN_CACHE_FOLDER=/root/.yarn yarn install --production - -# copy the actual code -COPY packages/server/dist packages/server/dist -COPY packages/server/pm2.config.js packages/server/pm2.config.js -COPY packages/server/client packages/server/client -COPY packages/server/builder packages/server/builder -COPY packages/worker/dist packages/worker/dist -COPY packages/worker/pm2.config.js packages/worker/pm2.config.js -COPY packages/string-templates packages/string-templates - - -FROM budibase/couchdb as runner -ARG TARGETARCH -ENV TARGETARCH $TARGETARCH -ENV NODE_MAJOR 18 -#TARGETBUILD can be set to single (for single docker image) or aas (for azure app service) -# e.g. docker build --build-arg TARGETBUILD=aas .... -ARG TARGETBUILD=single -ENV TARGETBUILD $TARGETBUILD - -# install base dependencies -RUN apt-get update && \ - apt-get install -y --no-install-recommends software-properties-common nginx uuid-runtime redis-server libaio1 - -# Install postgres client for pg_dump utils -RUN apt install -y software-properties-common apt-transport-https ca-certificates gnupg \ - && curl -fsSl https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor | tee /usr/share/keyrings/postgresql.gpg > /dev/null \ - && echo deb [arch=amd64,arm64,ppc64el signed-by=/usr/share/keyrings/postgresql.gpg] http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main | tee /etc/apt/sources.list.d/postgresql.list \ - && apt update -y \ - && apt install postgresql-client-15 -y \ - && apt remove software-properties-common apt-transport-https gpg -y - -# install other dependencies, nodejs, oracle requirements, jdk8, redis, nginx -WORKDIR /nodejs -COPY scripts/install-node.sh ./install.sh -RUN chmod +x install.sh && ./install.sh - -# setup nginx -COPY hosting/single/nginx/nginx.conf /etc/nginx -COPY hosting/single/nginx/nginx-default-site.conf /etc/nginx/sites-enabled/default -RUN mkdir -p /var/log/nginx && \ - touch /var/log/nginx/error.log && \ - touch /var/run/nginx.pid && \ - usermod -a -G tty www-data - -WORKDIR / -RUN mkdir -p scripts/integrations/oracle -COPY packages/server/scripts/integrations/oracle scripts/integrations/oracle -RUN /bin/bash -e ./scripts/integrations/oracle/instantclient/linux/install.sh - -# setup minio -WORKDIR /minio -COPY scripts/install-minio.sh ./install.sh -RUN chmod +x install.sh && ./install.sh - -# setup runner file -WORKDIR / -COPY hosting/single/runner.sh . -RUN chmod +x ./runner.sh -COPY hosting/single/healthcheck.sh . -RUN chmod +x ./healthcheck.sh - -# Script below sets the path for storing data based on $DATA_DIR -# For Azure App Service install SSH & point data locations to /home -COPY hosting/single/ssh/sshd_config /etc/ -COPY hosting/single/ssh/ssh_setup.sh /tmp -RUN /build-target-paths.sh - - -# setup letsencrypt certificate -RUN apt-get install -y certbot python3-certbot-nginx -COPY hosting/letsencrypt /app/letsencrypt -RUN chmod +x /app/letsencrypt/certificate-request.sh /app/letsencrypt/certificate-renew.sh - -COPY --from=build /app/node_modules /node_modules -COPY --from=build /app/package.json /package.json -COPY --from=build /app/packages/server /app -COPY --from=build /app/packages/worker /worker -COPY --from=build /app/packages/string-templates /string-templates - -RUN cd /string-templates && yarn link && cd ../app && yarn link @budibase/string-templates && cd ../worker && yarn link @budibase/string-templates - - -EXPOSE 80 -EXPOSE 443 -# Expose port 2222 for SSH on Azure App Service build -EXPOSE 2222 -VOLUME /data - -ARG BUDIBASE_VERSION -# Ensuring the version argument is sent -RUN test -n "$BUDIBASE_VERSION" -ENV BUDIBASE_VERSION=$BUDIBASE_VERSION - -HEALTHCHECK --interval=15s --timeout=15s --start-period=45s CMD "/healthcheck.sh" - -# must set this just before running -ENV NODE_ENV=production -WORKDIR / - -CMD ["./runner.sh"] diff --git a/hosting/single/healthcheck.sh b/hosting/single/healthcheck.sh index 592b3e94fa..12e340062c 100644 --- a/hosting/single/healthcheck.sh +++ b/hosting/single/healthcheck.sh @@ -25,7 +25,7 @@ if [[ $(curl -s -w "%{http_code}\n" http://localhost:4002/health -o /dev/null) - healthy=false fi -if [[ $(curl -s -w "%{http_code}\n" http://localhost:5984/ -o /dev/null) -ne 200 ]]; then +if [[ $(curl -s -w "%{http_code}\n" http://localhost:5984/_up -o /dev/null) -ne 200 ]]; then echo 'ERROR: CouchDB is not running'; healthy=false fi diff --git a/hosting/single/runner.sh b/hosting/single/runner.sh index 770b23eec1..3126eedfb1 100644 --- a/hosting/single/runner.sh +++ b/hosting/single/runner.sh @@ -7,7 +7,7 @@ declare -a DOCKER_VARS=("APP_PORT" "APPS_URL" "ARCHITECTURE" "BUDIBASE_ENVIRONME [[ -z "${BUDIBASE_ENVIRONMENT}" ]] && export BUDIBASE_ENVIRONMENT=PRODUCTION [[ -z "${CLUSTER_PORT}" ]] && export CLUSTER_PORT=80 [[ -z "${DEPLOYMENT_ENVIRONMENT}" ]] && export DEPLOYMENT_ENVIRONMENT=docker -[[ -z "${MINIO_URL}" ]] && export MINIO_URL=http://127.0.0.1:9000 +[[ -z "${MINIO_URL}" ]] && [[ -z "${USE_S3}" ]] && export MINIO_URL=http://127.0.0.1:9000 [[ -z "${NODE_ENV}" ]] && export NODE_ENV=production [[ -z "${POSTHOG_TOKEN}" ]] && export POSTHOG_TOKEN=phc_bIjZL7oh2GEUd2vqvTBH8WvrX0fWTFQMs6H5KQxiUxU [[ -z "${TENANT_FEATURE_FLAGS}" ]] && export TENANT_FEATURE_FLAGS="*:LICENSING,*:USER_GROUPS,*:ONBOARDING_TOUR" @@ -22,11 +22,11 @@ declare -a DOCKER_VARS=("APP_PORT" "APPS_URL" "ARCHITECTURE" "BUDIBASE_ENVIRONME # Azure App Service customisations if [[ "${TARGETBUILD}" = "aas" ]]; then - DATA_DIR=/home + export DATA_DIR="${DATA_DIR:-/home}" WEBSITES_ENABLE_APP_SERVICE_STORAGE=true /etc/init.d/ssh start else - DATA_DIR=${DATA_DIR:-/data} + export DATA_DIR=${DATA_DIR:-/data} fi mkdir -p ${DATA_DIR} # Mount NFS or GCP Filestore if env vars exist for it @@ -77,7 +77,12 @@ mkdir -p ${DATA_DIR}/minio chown -R couchdb:couchdb ${DATA_DIR}/couch redis-server --requirepass $REDIS_PASSWORD > /dev/stdout 2>&1 & /bbcouch-runner.sh & -minio server --console-address ":9001" ${DATA_DIR}/minio > /dev/stdout 2>&1 & + +# only start minio if use s3 isn't passed +if [[ -z "${USE_S3}" ]]; then + /minio/minio server --console-address ":9001" ${DATA_DIR}/minio > /dev/stdout 2>&1 & +fi + /etc/init.d/nginx restart if [[ ! -z "${CUSTOM_DOMAIN}" ]]; then # Add monthly cron job to renew certbot certificate @@ -92,10 +97,12 @@ fi sleep 10 pushd app -pm2 start -l /dev/stdout --name app "yarn run:docker" +pm2 start --name app "yarn run:docker" popd pushd worker -pm2 start -l /dev/stdout --name worker "yarn run:docker" +pm2 start --name worker "yarn run:docker" popd echo "end of runner.sh, sleeping ..." + +tail -f $HOME/.pm2/logs/*.log sleep infinity diff --git a/i18n/README.de.md b/i18n/README.de.md index a2f4c3afb9..17d3d1ebbe 100644 --- a/i18n/README.de.md +++ b/i18n/README.de.md @@ -1,6 +1,6 @@

- Budibase + Budibase

diff --git a/i18n/README.es.md b/i18n/README.es.md index 21eb8caef7..a7d1112914 100644 --- a/i18n/README.es.md +++ b/i18n/README.es.md @@ -1,6 +1,6 @@

- Budibase + Budibase

@@ -207,8 +207,7 @@ Desde comunicar un bug a solventar un error en el codigo, toda contribucion es a implementar una nueva funcionalidad o un realizar un cambio en la API, por favor crea un [nuevo mensaje aqui](https://github.com/Budibase/budibase/issues), de esta manera nos encargaremos que tu trabajo no sea en vano. -Aqui tienes instrucciones de como configurar tu entorno Budibase para [Debian](https://github.com/Budibase/budibase/tree/HEAD/docs/DEV-SETUP-DEBIAN.md) -y [MacOSX](https://github.com/Budibase/budibase/tree/HEAD/docs/DEV-SETUP-MACOSX.md) +Aqui tienes instrucciones de como configurar tu entorno Budibase para [aquí](https://github.com/Budibase/budibase/tree/HEAD/docs/CONTRIBUTING.md). ### No estas seguro por donde empezar? Un buen lugar para empezar a contribuir con nosotros es [aqui](https://github.com/Budibase/budibase/projects/22). diff --git a/i18n/README.fr.md b/i18n/README.fr.md index 12abd4d073..f5f9fbb25e 100644 --- a/i18n/README.fr.md +++ b/i18n/README.fr.md @@ -1,6 +1,6 @@

- Budibase + Budibase

diff --git a/i18n/README.id.md b/i18n/README.id.md index d4a25f569c..c2077f3922 100644 --- a/i18n/README.id.md +++ b/i18n/README.id.md @@ -1,6 +1,6 @@

- Budibase + Budibase

diff --git a/i18n/README.jp.md b/i18n/README.jp.md index 6fea497d53..62d0b1d3aa 100644 --- a/i18n/README.jp.md +++ b/i18n/README.jp.md @@ -1,6 +1,6 @@

- Budibase + Budibase

diff --git a/i18n/README.kr.md b/i18n/README.kr.md new file mode 100644 index 0000000000..09fc83569b --- /dev/null +++ b/i18n/README.kr.md @@ -0,0 +1,221 @@ +

+ + Budibase + +

+

+ Budibase +

+

+ 자체 인프라에서 몇 분 만에 맞춤형 비즈니스 도구를 구축하세요. +

+

+ Budibase는 개발자와 IT 전문가가 몇 분 만에 맞춤형 애플리케이션을 구축하고 자동화할 수 있는 오픈 소스 로우코드 플랫폼입니다. +

+ +

+ 🤖 🎨 🚀 +

+ +

+ Budibase design ui +

+ +

+ + GitHub all releases + + + GitHub release (latest by date) + + + Follow @budibase + + Code of conduct + + + +

+ +

+ 소개 + · + 문서 + · + 기능 요청 + · + 버그 보고 + · + 지원: 토론 +

+ +

+## ✨ 특징 + +### "실제" 소프트웨어를 구축할 수 있습니다. +Budibase를 사용하면 고성능 단일 페이지 애플리케이션을 구축할 수 있습니다. 또한 반응형 디자인으로 제작하여 사용자에게 멋진 경험을 제공할 수 있습니다. +

+ +### 오픈 소스 및 확장성 +Budibase는 오픈소스이며, GPL v3 라이선스에 따라 공개되어 있습니다. 이는 Budibase가 항상 당신 곁에 있다는 안도감을 줄 것입니다. 그리고 우리는 개발자 친화적인 환경을 제공하고 있기 때문에, 당신은 원하는 만큼 소스 코드를 포크하여 수정하거나 Budibase에 직접 기여할 수 있습니다. +

+ +### 기존 데이터 또는 처음부터 시작 +Budibase를 사용하면 다음과 같은 여러 소스에서 데이터를 가져올 수 있습니다: MondoDB, CouchDB, PostgreSQL, MySQL, Airtable, S3, DynamoDB 또는 REST API. + +또는 원하는 경우 외부 도구 없이도 Budibase를 사용하여 처음부터 시작하여 자체 애플리케이션을 구축할 수 있습니다.[데이터 소스 제안](https://github.com/Budibase/budibase/discussions?discussions_q=category%3AIdeas). + +

+ Budibase data +

+

+ +### 강력한 내장 구성 요소로 애플리케이션을 설계하고 구축할 수 있습니다. + +Budibase에는 아름답게 디자인된 강력한 컴포넌트들이 제공되며, 이를 사용하여 UI를 쉽게 구축할 수 있습니다. 또한, CSS를 통한 스타일링 옵션도 풍부하게 제공되어 보다 창의적인 표현도 가능하다. + [Request new component](https://github.com/Budibase/budibase/discussions?discussions_q=category%3AIdeas). + +

+ Budibase design +

+

+ +### 프로세스를 자동화하고, 다른 도구와 연동하고, 웹훅으로 연결하세요! +워크플로우와 수동 프로세스를 자동화하여 시간을 절약하세요. 웹훅 이벤트 연결부터 이메일 자동화까지, Budibase에 수행할 작업을 지시하기만 하면 자동으로 처리됩니다. [새로운 자동화 만들기](https://github.com/Budibase/automations)또는[새로운 자동화를 요청할 수 있습니다](https://github.com/Budibase/budibase/discussions?discussions_q=category%3AIdeas). + +

+ Budibase automations +

+

+ +### 선호하는 도구 +Budibase는 사용자의 선호도에 따라 애플리케이션을 구축할 수 있는 다양한 도구를 통합하고 있습니다. + +

+ Budibase integrations +

+

+ +### 관리자의 천국 +Budibase는 어떤 규모의 프로젝트에도 유연하게 대응할 수 있으며, Budibase를 사용하면 개인 또는 조직의 서버에서 자체 호스팅하고 사용자, 온보딩, SMTP, 앱, 그룹, 테마 등을 한꺼번에 관리할 수 있습니다. 또한, 사용자나 그룹에 앱 포털을 제공하고 그룹 관리자에게 사용자 관리를 맡길 수도 있다. +- 프로모션 비디오: https://youtu.be/xoljVpty_Kw + +


+ +## 🏁 시작 + +Docker, Kubernetes 또는 Digital Ocean을 사용하여 자체 인프라에서 Budibase를 호스팅하거나, 걱정 없이 빠르게 애플리케이션을 구축하려는 경우 클라우드에서 Budibase를 사용할 수 있습니다. + +### [Budibase 셀프 호스팅으로 시작하기](https://docs.budibase.com/docs/hosting-methods) + +- [Docker - single ARM compatible image](https://docs.budibase.com/docs/docker) +- [Docker Compose](https://docs.budibase.com/docs/docker-compose) +- [Kubernetes](https://docs.budibase.com/docs/kubernetes-k8s) +- [Digital Ocean](https://docs.budibase.com/docs/digitalocean) +- [Portainer](https://docs.budibase.com/docs/portainer) + + +### [클라우드에서 Budibase 시작하기](https://budibase.com) + +

+ +## 🎓 Budibase 알아보기 + +문서 [documentacion de Budibase](https://docs.budibase.com/docs). +
+ + +

+ +## 💬 커뮤니티 + +질문하고, 다른 사람을 돕고, 다른 Budibase 사용자와 즐거운 대화를 나눌 수 있는 Budibase 커뮤니티에 여러분을 초대합니다. +[깃허브 토론](https://github.com/Budibase/budibase/discussions) +


+ + +## ❗ 행동강령 + +Budibase 는 모든 계층의 사람들을 환영하고 상호 존중하는 환경을 제공하는 데 특별한 주의를 기울이고 있습니다. 저희는 커뮤니티에도 같은 기대를 가지고 있습니다. +[**행동 강령**](https://github.com/Budibase/budibase/blob/HEAD/.github/CODE_OF_CONDUCT.md). +
+ +

+ + +## 🙌 Contribuir en Budibase + +버그 신고부터 코드의 버그 수정에 이르기까지 모든 기여를 감사하고 환영합니다. 새로운 기능을 구현하거나 API를 변경할 계획이 있다면 [여기에 새 메시지](https://github.com/Budibase/budibase/issues), +이렇게 하면 여러분의 노력이 헛되지 않도록 보장할 수 있습니다. + +여기에는 다음을 위해 Budibase 환경을 설정하는 방법에 대한 지침이 나와 있습니다. [여기를 클릭하세요](https://github.com/Budibase/budibase/tree/HEAD/docs/CONTRIBUTING.md). + +### 어디서부터 시작해야 할지 혼란스러우신가요? +이곳은 기여를 시작하기에 최적의 장소입니다! [First time issues project](https://github.com/Budibase/budibase/projects/22). + +### 리포지토리 구성 + +Budibase는 Lerna에서 관리하는 단일 리포지토리입니다. Lerna는 변경 사항이 있을 때마다 이를 동기화하여 Budibase 패키지를 빌드하고 게시합니다. 크게 보면 이러한 패키지가 Budibase를 구성하는 패키지입니다: + +- [packages/builder](https://github.com/Budibase/budibase/tree/HEAD/packages/builder) - budibase builder 클라이언트 측의 svelte 애플리케이션 코드가 포함되어 있습니다. + +- [packages/client](https://github.com/Budibase/budibase/tree/HEAD/packages/client) - budibase builder 클라이언트 측의 svelte 애플리케이션 코드가 포함되어 있습니다. + +- [packages/server](https://github.com/Budibase/budibase/tree/HEAD/packages/server) - Budibase의 서버 부분입니다. 이 Koa 애플리케이션은 빌더에게 Budibase 애플리케이션을 생성하는 데 필요한 것을 제공하는 역할을 합니다. 또한 데이터베이스 및 파일 저장소와 상호 작용할 수 있는 API를 제공합니다. + +자세한 내용은 다음 문서를 참조하세요. [CONTRIBUTING.md](https://github.com/Budibase/budibase/blob/HEAD/docs/CONTRIBUTING.md) + +

+ + +## 📝 라이선스 + +Budibase는 오픈 소스이며, 라이선스는 다음과 같습니다 [GPL v3](https://www.gnu.org/licenses/gpl-3.0.en.html). 클라이언트 및 컴포넌트 라이브러리는 다음과 같이 라이선스가 부여됩니다. [MPL](https://directory.fsf.org/wiki/License:MPL-2.0) - 이렇게 하면 빌드한 애플리케이션에 원하는 대로 라이선스를 부여할 수 있습니다. + +

+ +## ⭐ 스타 수의 역사 + +[![Stargazers over time](https://starchart.cc/Budibase/budibase.svg)](https://starchart.cc/Budibase/budibase) + +빌더 업데이트 중 문제가 발생하는 경우 [여기](https://github.com/Budibase/budibase/blob/HEAD/docs/CONTRIBUTING.md#troubleshooting) 를 참고하여 환경을 정리해 주세요. + +

+ +## Contributors ✨ + +훌륭한 여러분께 감사할 따름입니다. ([emoji key](https://allcontributors.org/docs/en/emoji-key)): + + + + + + + + + + + + + + + + + + + + + + + + + +

Martin McKeaveney

💻 📖 ⚠️ 🚇

Michael Drury

📖 💻 ⚠️ 🚇

Andrew Kingston

📖 💻 ⚠️ 🎨

Michael Shanks

📖 💻 ⚠️

Kevin Åberg Kultalahti

📖 💻 ⚠️

Joe

📖 💻 🖋 🎨

Rory Powell

💻 📖 ⚠️

Peter Clement

💻 📖 ⚠️

Conor_Mack

💻 ⚠️

pngwn

💻 ⚠️

HugoLd

💻

victoriasloan

💻

yashank09

💻

SOVLOOKUP

💻

seoulaja

🌍

Maurits Lourens

⚠️ 💻
+ + + + + + +이 프로젝트는 다음 사양을 따릅니다. [all-contributors](https://github.com/all-contributors/all-contributors). +모든 종류의 기여를 환영합니다! diff --git a/i18n/README.zh.md b/i18n/README.zh.md index 7e4dffd387..a6a9575029 100644 --- a/i18n/README.zh.md +++ b/i18n/README.zh.md @@ -1,6 +1,6 @@

- Budibase + Budibase

diff --git a/jestTestcontainersConfigGenerator.js b/jestTestcontainersConfigGenerator.js deleted file mode 100644 index 1e39ed771f..0000000000 --- a/jestTestcontainersConfigGenerator.js +++ /dev/null @@ -1,16 +0,0 @@ -module.exports = () => { - return { - couchdb: { - image: "budibase/couchdb", - ports: [5984], - env: { - COUCHDB_PASSWORD: "budibase", - COUCHDB_USER: "budibase", - }, - wait: { - type: "ports", - timeout: 20000, - } - } - } -} diff --git a/lerna.json b/lerna.json index f0f51242d1..bacdcb782f 100644 --- a/lerna.json +++ b/lerna.json @@ -1,10 +1,13 @@ { - "version": "2.12.4", + "version": "2.22.13", "npmClient": "yarn", "packages": [ - "packages/*" + "packages/*", + "!packages/account-portal", + "packages/account-portal/packages/*" ], "useNx": true, + "concurrency": 20, "command": { "publish": { "ignoreChanges": [ diff --git a/package.json b/package.json index 417fb31e0e..32693a0b6f 100644 --- a/package.json +++ b/package.json @@ -2,32 +2,40 @@ "name": "root", "private": true, "devDependencies": { + "@babel/core": "^7.22.5", + "@babel/eslint-parser": "^7.22.5", + "@babel/preset-env": "^7.22.5", "@esbuild-plugins/tsconfig-paths": "^0.1.2", - "@typescript-eslint/parser": "6.7.2", + "@types/node": "20.10.0", + "@typescript-eslint/parser": "6.9.0", "esbuild": "^0.18.17", "esbuild-node-externals": "^1.8.0", - "eslint": "^8.44.0", + "eslint": "^8.52.0", + "eslint-plugin-import": "^2.29.0", + "eslint-plugin-jest": "^27.9.0", + "eslint-plugin-local-rules": "^2.0.0", + "eslint-plugin-svelte": "^2.34.0", "husky": "^8.0.3", "kill-port": "^1.6.1", "lerna": "7.1.1", "madge": "^6.0.0", - "minimist": "^1.2.8", "nx": "16.4.3", "nx-cloud": "16.0.5", "prettier": "2.8.8", "prettier-plugin-svelte": "^2.3.0", - "svelte": "3.49.0", + "svelte": "^4.2.10", + "svelte-eslint-parser": "^0.33.1", "typescript": "5.2.2", - "@babel/core": "^7.22.5", - "@babel/eslint-parser": "^7.22.5", - "@babel/preset-env": "^7.22.5", - "eslint-plugin-svelte": "^2.32.2", - "svelte-eslint-parser": "^0.32.0" + "typescript-eslint": "^7.3.1", + "yargs": "^17.7.2" }, "scripts": { "preinstall": "node scripts/syncProPackage.js", + "get-past-client-version": "node scripts/getPastClientVersion.js", "setup": "git config submodule.recurse true && git submodule update && node ./hosting/scripts/setup.js && yarn && yarn build && yarn dev", - "build": "lerna run build --stream", + "build": "NODE_OPTIONS=--max-old-space-size=1500 lerna run build --stream", + "build:oss": "NODE_OPTIONS=--max-old-space-size=1500 lerna run build --stream --ignore @budibase/account-portal --ignore @budibase/account-portal-server --ignore @budibase/account-portal-ui", + "build:account-portal": "NODE_OPTIONS=--max-old-space-size=1500 lerna run build --stream --scope @budibase/account-portal --scope @budibase/account-portal-server --scope @budibase/account-portal-ui", "build:dev": "lerna run --stream prebuild && yarn nx run-many --target=build --output-style=dynamic --watch --preserveWatchOutput", "check:types": "lerna run check:types", "build:sdk": "lerna run --stream build:sdk", @@ -37,13 +45,16 @@ "nuke": "yarn run nuke:packages && yarn run nuke:docker", "nuke:packages": "yarn run restore", "nuke:docker": "lerna run --stream dev:stack:nuke", - "clean": "lerna clean -y", + "clean": "lerna clean -y && echo Cleaning top level node modules 🧹 && rm -rf ./node_modules && echo Done! 🚀", "kill-builder": "kill-port 3000", "kill-server": "kill-port 4001 4002", - "kill-all": "yarn run kill-builder && yarn run kill-server", - "dev": "yarn run kill-all && lerna run --parallel prebuild && lerna run --stream dev:builder", - "dev:noserver": "yarn run kill-builder && lerna run --stream dev:stack:up && lerna run --stream dev:builder --ignore @budibase/backend-core --ignore @budibase/server --ignore @budibase/worker", - "dev:server": "yarn run kill-server && lerna run --stream dev:builder --scope @budibase/worker --scope @budibase/server", + "kill-accountportal": "kill-port 3001 4003", + "kill-all": "yarn run kill-builder && yarn run kill-server && yarn kill-accountportal", + "dev": "yarn run kill-all && lerna run --parallel prebuild && lerna run --stream dev --ignore=@budibase/account-portal-ui --ignore @budibase/account-portal-server", + "dev:noserver": "yarn run kill-builder && lerna run --stream dev:stack:up --ignore @budibase/account-portal-server && lerna run --stream dev --ignore @budibase/backend-core --ignore @budibase/server --ignore @budibase/worker --ignore=@budibase/account-portal-ui --ignore @budibase/account-portal-server", + "dev:server": "yarn run kill-server && lerna run --stream dev --scope @budibase/worker --scope @budibase/server", + "dev:accountportal": "yarn kill-accountportal && lerna run dev --stream --scope @budibase/account-portal-ui --scope @budibase/account-portal-server", + "dev:all": "yarn run kill-all && lerna run --stream dev", "dev:built": "yarn run kill-all && cd packages/server && yarn dev:stack:up && cd ../../ && lerna run --stream dev:built", "dev:docker": "yarn build --scope @budibase/server --scope @budibase/worker && docker-compose -f hosting/docker-compose.build.yaml -f hosting/docker-compose.dev.yaml --env-file hosting/.env up --build --scale proxy-service=0", "test": "lerna run --stream test --stream", @@ -52,7 +63,7 @@ "lint": "yarn run lint:eslint && yarn run lint:prettier", "lint:fix:eslint": "eslint --fix --max-warnings=0 packages qa-core", "lint:fix:prettier": "prettier --write \"packages/**/*.{js,ts,svelte}\" && prettier --write \"examples/**/*.{js,ts,svelte}\" && prettier --write \"qa-core/**/*.{js,ts,svelte}\"", - "lint:fix": "yarn run lint:fix:prettier && yarn run lint:fix:eslint", + "lint:fix": "yarn run lint:fix:eslint && yarn run lint:fix:prettier", "build:specs": "lerna run --stream specs", "build:docker:airgap": "node hosting/scripts/airgapped/airgappedDockerBuild", "build:docker:airgap:single": "SINGLE_IMAGE=1 node hosting/scripts/airgapped/airgappedDockerBuild", @@ -77,21 +88,33 @@ "security:audit": "node scripts/audit.js", "postinstall": "husky install", "submodules:load": "git submodule init && git submodule update && yarn", - "submodules:unload": "git submodule deinit --all && yarn" + "submodules:unload": "git submodule deinit --all && yarn", + "add-app-migration": "node scripts/add-app-migration.js --title" }, "workspaces": { "packages": [ - "packages/*" + "packages/*", + "!packages/account-portal", + "packages/account-portal/packages/*" ] }, "resolutions": { "@budibase/backend-core": "0.0.0", "@budibase/shared-core": "0.0.0", "@budibase/string-templates": "0.0.0", - "@budibase/types": "0.0.0" + "@budibase/types": "0.0.0", + "tough-cookie": "4.1.3", + "node-fetch": "2.6.7", + "semver": "7.5.3", + "http-cache-semantics": "4.1.1", + "msgpackr": "1.10.1", + "axios": "1.6.3", + "xml2js": "0.6.2", + "unset-value": "2.0.1", + "passport": "0.6.0" }, "engines": { - "node": ">=18.0.0 <19.0.0" + "node": ">=20.0.0 <21.0.0" }, "dependencies": {} } diff --git a/packages/account-portal b/packages/account-portal new file mode 160000 index 0000000000..63ce32bca8 --- /dev/null +++ b/packages/account-portal @@ -0,0 +1 @@ +Subproject commit 63ce32bca871f0a752323f5f7ebb5ec16bbbacc3 diff --git a/packages/backend-core/jest-testcontainers-config.js b/packages/backend-core/jest-testcontainers-config.js deleted file mode 100644 index 8ac0f0cd9d..0000000000 --- a/packages/backend-core/jest-testcontainers-config.js +++ /dev/null @@ -1,8 +0,0 @@ -const { join } = require("path") -require("dotenv").config({ - path: join(__dirname, "..", "..", "hosting", ".env"), -}) - -const jestTestcontainersConfigGenerator = require("../../jestTestcontainersConfigGenerator") - -module.exports = jestTestcontainersConfigGenerator() diff --git a/packages/backend-core/jest.config.ts b/packages/backend-core/jest.config.ts index 3f1065ead2..c944b0d7e1 100644 --- a/packages/backend-core/jest.config.ts +++ b/packages/backend-core/jest.config.ts @@ -1,8 +1,8 @@ import { Config } from "@jest/types" const baseConfig: Config.InitialProjectOptions = { - preset: "@trendyol/jest-testcontainers", setupFiles: ["./tests/jestEnv.ts"], + globalSetup: "./../../globalSetup.ts", setupFilesAfterEnv: ["./tests/jestSetup.ts"], transform: { "^.+\\.ts?$": "@swc/jest", diff --git a/packages/backend-core/package.json b/packages/backend-core/package.json index dc8d71b52c..030fec8728 100644 --- a/packages/backend-core/package.json +++ b/packages/backend-core/package.json @@ -21,22 +21,23 @@ "test:watch": "jest --watchAll" }, "dependencies": { - "@budibase/nano": "10.1.3", + "@budibase/nano": "10.1.5", "@budibase/pouchdb-replication-stream": "1.2.10", "@budibase/shared-core": "0.0.0", "@budibase/types": "0.0.0", - "@techpass/passport-openidconnect": "0.3.2", + "@govtechsg/passport-openidconnect": "^1.0.2", "aws-cloudfront-sign": "3.0.2", "aws-sdk": "2.1030.0", "bcrypt": "5.1.0", "bcryptjs": "2.4.3", "bull": "4.10.1", "correlation-id": "4.0.0", + "dd-trace": "5.2.0", "dotenv": "16.0.1", "ioredis": "5.3.2", "joi": "17.6.0", "jsonwebtoken": "9.0.2", - "koa-passport": "4.1.4", + "koa-passport": "^6.0.0", "koa-pino-logger": "4.0.0", "lodash": "4.17.21", "node-fetch": "2.6.7", @@ -51,33 +52,32 @@ "redlock": "4.2.0", "rotating-file-stream": "3.1.0", "sanitize-s3-objectkey": "0.0.1", - "semver": "7.3.7", + "semver": "^7.5.4", "tar-fs": "2.1.1", - "uuid": "8.3.2" + "uuid": "^8.3.2" }, "devDependencies": { "@shopify/jest-koa-mocks": "5.1.1", "@swc/core": "1.3.71", "@swc/jest": "0.2.27", - "@trendyol/jest-testcontainers": "^2.1.1", "@types/chance": "1.1.3", "@types/cookies": "0.7.8", "@types/jest": "29.5.5", "@types/lodash": "4.14.200", - "@types/node": "18.17.0", "@types/node-fetch": "2.6.4", "@types/pouchdb": "6.4.0", - "@types/redlock": "4.0.3", + "@types/redlock": "4.0.7", "@types/semver": "7.3.7", "@types/tar-fs": "2.0.1", "@types/uuid": "8.3.4", "chance": "1.1.8", - "ioredis-mock": "8.7.0", - "jest": "29.6.2", - "jest-environment-node": "29.6.2", + "ioredis-mock": "8.9.0", + "jest": "29.7.0", + "jest-environment-node": "29.7.0", "jest-serial-runner": "1.2.1", "pino-pretty": "10.0.0", "pouchdb-adapter-memory": "7.2.2", + "testcontainers": "^10.7.2", "timekeeper": "2.2.0", "typescript": "5.2.2" }, diff --git a/packages/backend-core/scripts/test.sh b/packages/backend-core/scripts/test.sh index 7d19ec96cc..b9937e3a4a 100644 --- a/packages/backend-core/scripts/test.sh +++ b/packages/backend-core/scripts/test.sh @@ -4,10 +4,10 @@ set -e if [[ -n $CI ]] then # --runInBand performs better in ci where resources are limited - echo "jest --coverage --runInBand --forceExit" - jest --coverage --runInBand --forceExit + echo "jest --coverage --runInBand --forceExit $@" + jest --coverage --runInBand --forceExit $@ else # --maxWorkers performs better in development - echo "jest --coverage --detectOpenHandles" - jest --coverage --detectOpenHandles + echo "jest --coverage --forceExit --detectOpenHandles $@" + jest --coverage --forceExit --detectOpenHandles $@ fi \ No newline at end of file diff --git a/packages/backend-core/src/auth/auth.ts b/packages/backend-core/src/auth/auth.ts index 0100a2d0e2..87ac46cf1c 100644 --- a/packages/backend-core/src/auth/auth.ts +++ b/packages/backend-core/src/auth/auth.ts @@ -1,5 +1,6 @@ const _passport = require("koa-passport") const LocalStrategy = require("passport-local").Strategy + import { getGlobalDB } from "../context" import { Cookie } from "../constants" import { getSessionsForUser, invalidateSessions } from "../security/sessions" @@ -18,6 +19,7 @@ import { GoogleInnerConfig, OIDCInnerConfig, PlatformLogoutOpts, + SessionCookie, SSOProviderType, } from "@budibase/types" import * as events from "../events" @@ -26,6 +28,7 @@ import { clearCookie, getCookie } from "../utils" import { ssoSaveUserNoOp } from "../middleware/passport/sso/sso" const refresh = require("passport-oauth2-refresh") + export { auditLog, authError, @@ -42,7 +45,6 @@ export const buildAuthMiddleware = authenticated export const buildTenancyMiddleware = tenancy export const buildCsrfMiddleware = csrf export const passport = _passport -export const jwt = require("jsonwebtoken") // Strategies _passport.use(new LocalStrategy(local.options, local.authenticate)) @@ -131,7 +133,7 @@ export async function refreshOAuthToken( configId?: string ): Promise { switch (providerType) { - case SSOProviderType.OIDC: + case SSOProviderType.OIDC: { if (!configId) { return { err: { data: "OIDC config id not provided" } } } @@ -140,12 +142,14 @@ export async function refreshOAuthToken( return { err: { data: "OIDC configuration not found" } } } return refreshOIDCAccessToken(oidcConfig, refreshToken) - case SSOProviderType.GOOGLE: + } + case SSOProviderType.GOOGLE: { let googleConfig = await configs.getGoogleConfig() if (!googleConfig) { return { err: { data: "Google configuration not found" } } } return refreshGoogleAccessToken(googleConfig, refreshToken) + } } } @@ -189,10 +193,10 @@ export async function platformLogout(opts: PlatformLogoutOpts) { if (!ctx) throw new Error("Koa context must be supplied to logout.") - const currentSession = getCookie(ctx, Cookie.Auth) + const currentSession = getCookie(ctx, Cookie.Auth) let sessions = await getSessionsForUser(userId) - if (keepActiveSession) { + if (currentSession && keepActiveSession) { sessions = sessions.filter( session => session.sessionId !== currentSession.sessionId ) diff --git a/packages/backend-core/src/auth/tests/auth.spec.ts b/packages/backend-core/src/auth/tests/auth.spec.ts index 3ae691be58..a80e1ea739 100644 --- a/packages/backend-core/src/auth/tests/auth.spec.ts +++ b/packages/backend-core/src/auth/tests/auth.spec.ts @@ -8,7 +8,7 @@ describe("platformLogout", () => { await testEnv.withTenant(async () => { const ctx = structures.koa.newContext() await auth.platformLogout({ ctx, userId: "test" }) - expect(events.auth.logout).toBeCalledTimes(1) + expect(events.auth.logout).toHaveBeenCalledTimes(1) }) }) }) diff --git a/packages/backend-core/src/cache/appMetadata.ts b/packages/backend-core/src/cache/appMetadata.ts index bd3efc20db..d442511fb8 100644 --- a/packages/backend-core/src/cache/appMetadata.ts +++ b/packages/backend-core/src/cache/appMetadata.ts @@ -19,7 +19,7 @@ async function populateFromDB(appId: string) { return doWithDB( appId, (db: Database) => { - return db.get(DocumentType.APP_METADATA) + return db.get(DocumentType.APP_METADATA) }, { skip_setup: true } ) diff --git a/packages/backend-core/src/cache/base/index.ts b/packages/backend-core/src/cache/base/index.ts index 264984c6a5..433941b5c7 100644 --- a/packages/backend-core/src/cache/base/index.ts +++ b/packages/backend-core/src/cache/base/index.ts @@ -23,6 +23,18 @@ export default class BaseCache { return client.keys(pattern) } + async exists(key: string, opts = { useTenancy: true }) { + key = opts.useTenancy ? generateTenantKey(key) : key + const client = await this.getClient() + return client.exists(key) + } + + async scan(key: string, opts = { useTenancy: true }) { + key = opts.useTenancy ? generateTenantKey(key) : key + const client = await this.getClient() + return client.scan(key) + } + /** * Read only from the cache. */ @@ -32,6 +44,15 @@ export default class BaseCache { return client.get(key) } + /** + * Read only from the cache. + */ + async bulkGet(keys: string[], opts = { useTenancy: true }) { + keys = opts.useTenancy ? keys.map(key => generateTenantKey(key)) : keys + const client = await this.getClient() + return client.bulkGet(keys) + } + /** * Write to the cache. */ @@ -46,6 +67,25 @@ export default class BaseCache { await client.store(key, value, ttl) } + /** + * Bulk write to the cache. + */ + async bulkStore( + data: Record, + ttl: number | null = null, + opts = { useTenancy: true } + ) { + if (opts.useTenancy) { + data = Object.entries(data).reduce((acc, [key, value]) => { + acc[generateTenantKey(key)] = value + return acc + }, {} as Record) + } + + const client = await this.getClient() + await client.bulkStore(data, ttl) + } + /** * Remove from cache. */ @@ -55,15 +95,24 @@ export default class BaseCache { return client.delete(key) } + /** + * Remove from cache. + */ + async bulkDelete(keys: string[], opts = { useTenancy: true }) { + keys = opts.useTenancy ? keys.map(key => generateTenantKey(key)) : keys + const client = await this.getClient() + return client.bulkDelete(keys) + } + /** * Read from the cache. Write to the cache if not exists. */ - async withCache( + async withCache( key: string, - ttl: number, - fetchFn: any, + ttl: number | null = null, + fetchFn: () => Promise | T, opts = { useTenancy: true } - ) { + ): Promise { const cachedValue = await this.get(key, opts) if (cachedValue) { return cachedValue @@ -80,7 +129,7 @@ export default class BaseCache { } } - async bustCache(key: string, opts = { client: null }) { + async bustCache(key: string) { const client = await this.getClient() try { await client.delete(generateTenantKey(key)) @@ -89,4 +138,13 @@ export default class BaseCache { throw err } } + + /** + * Delete the entry if the provided value matches the stored one. + */ + async deleteIfValue(key: string, value: any, opts = { useTenancy: true }) { + key = opts.useTenancy ? generateTenantKey(key) : key + const client = await this.getClient() + await client.deleteIfValue(key, value) + } } diff --git a/packages/backend-core/src/cache/docWritethrough.ts b/packages/backend-core/src/cache/docWritethrough.ts new file mode 100644 index 0000000000..05f13a0d91 --- /dev/null +++ b/packages/backend-core/src/cache/docWritethrough.ts @@ -0,0 +1,105 @@ +import { AnyDocument, Database } from "@budibase/types" + +import { JobQueue, Queue, createQueue } from "../queue" +import * as dbUtils from "../db" + +interface ProcessDocMessage { + dbName: string + docId: string + data: Record +} + +const PERSIST_MAX_ATTEMPTS = 100 +let processor: DocWritethroughProcessor | undefined + +export class DocWritethroughProcessor { + private static _queue: Queue + + public static get queue() { + if (!DocWritethroughProcessor._queue) { + DocWritethroughProcessor._queue = createQueue( + JobQueue.DOC_WRITETHROUGH_QUEUE, + { + jobOptions: { + attempts: PERSIST_MAX_ATTEMPTS, + }, + } + ) + } + + return DocWritethroughProcessor._queue + } + + init() { + DocWritethroughProcessor.queue.process(async message => { + try { + await this.persistToDb(message.data) + } catch (err: any) { + if (err.status === 409) { + // If we get a 409, it means that another job updated it meanwhile. We want to retry it to persist it again. + throw new Error( + `Conflict persisting message ${message.id}. Attempt ${message.attemptsMade}` + ) + } + + throw err + } + }) + return this + } + + private async persistToDb({ + dbName, + docId, + data, + }: { + dbName: string + docId: string + data: Record + }) { + const db = dbUtils.getDB(dbName) + let doc: AnyDocument | undefined + try { + doc = await db.get(docId) + } catch { + doc = { _id: docId } + } + + doc = { ...doc, ...data } + await db.put(doc) + } +} + +export class DocWritethrough { + private db: Database + private _docId: string + + constructor(db: Database, docId: string) { + this.db = db + this._docId = docId + } + + get docId() { + return this._docId + } + + async patch(data: Record) { + await DocWritethroughProcessor.queue.add({ + dbName: this.db.name, + docId: this.docId, + data, + }) + } +} + +export function init(): DocWritethroughProcessor { + processor = new DocWritethroughProcessor().init() + return processor +} + +export function getProcessor(): DocWritethroughProcessor { + if (!processor) { + return init() + } + return processor +} diff --git a/packages/backend-core/src/cache/generic.ts b/packages/backend-core/src/cache/generic.ts index 7cd5d6227f..2d6d8b9472 100644 --- a/packages/backend-core/src/cache/generic.ts +++ b/packages/backend-core/src/cache/generic.ts @@ -1,6 +1,6 @@ -const BaseCache = require("./base") +import BaseCache from "./base" -const GENERIC = new BaseCache.default() +const GENERIC = new BaseCache() export enum CacheKey { CHECKLIST = "checklist", @@ -18,13 +18,16 @@ export enum TTL { ONE_DAY = 86400, } -function performExport(funcName: string) { - return (...args: any) => GENERIC[funcName](...args) -} - -export const keys = performExport("keys") -export const get = performExport("get") -export const store = performExport("store") -export const destroy = performExport("delete") -export const withCache = performExport("withCache") -export const bustCache = performExport("bustCache") +export const keys = (...args: Parameters) => + GENERIC.keys(...args) +export const get = (...args: Parameters) => + GENERIC.get(...args) +export const store = (...args: Parameters) => + GENERIC.store(...args) +export const destroy = (...args: Parameters) => + GENERIC.delete(...args) +export const withCache = ( + ...args: Parameters> +) => GENERIC.withCache(...args) +export const bustCache = (...args: Parameters) => + GENERIC.bustCache(...args) diff --git a/packages/backend-core/src/cache/index.ts b/packages/backend-core/src/cache/index.ts index 58928c271a..3b25108634 100644 --- a/packages/backend-core/src/cache/index.ts +++ b/packages/backend-core/src/cache/index.ts @@ -2,4 +2,7 @@ export * as generic from "./generic" export * as user from "./user" export * as app from "./appMetadata" export * as writethrough from "./writethrough" +export * as invite from "./invite" +export * as passwordReset from "./passwordReset" export * from "./generic" +export * as docWritethrough from "./docWritethrough" diff --git a/packages/backend-core/src/cache/invite.ts b/packages/backend-core/src/cache/invite.ts new file mode 100644 index 0000000000..e3d698bcc6 --- /dev/null +++ b/packages/backend-core/src/cache/invite.ts @@ -0,0 +1,86 @@ +import * as utils from "../utils" +import { Duration } from "../utils" +import env from "../environment" +import { getTenantId } from "../context" +import * as redis from "../redis/init" + +const TTL_SECONDS = Duration.fromDays(7).toSeconds() + +interface Invite { + email: string + info: any +} + +interface InviteWithCode extends Invite { + code: string +} + +/** + * Given an invite code and invite body, allow the update an existing/valid invite in redis + * @param code The invite code for an invite in redis + * @param value The body of the updated user invitation + */ +export async function updateCode(code: string, value: Invite) { + const client = await redis.getInviteClient() + await client.store(code, value, TTL_SECONDS) +} + +/** + * Generates an invitation code and writes it to redis - which can later be checked for user creation. + * @param email the email address which the code is being sent to (for use later). + * @param info Information to be carried along with the invitation. + * @return returns the code that was stored to redis. + */ +export async function createCode(email: string, info: any): Promise { + const code = utils.newid() + const client = await redis.getInviteClient() + await client.store(code, { email, info }, TTL_SECONDS) + return code +} + +/** + * Checks that the provided invite code is valid - will return the email address of user that was invited. + * @param code the invite code that was provided as part of the link. + * @return If the code is valid then an email address will be returned. + */ +export async function getCode(code: string): Promise { + const client = await redis.getInviteClient() + const value = (await client.get(code)) as Invite | undefined + if (!value) { + throw "Invitation is not valid or has expired, please request a new one." + } + return value +} + +export async function deleteCode(code: string) { + const client = await redis.getInviteClient() + await client.delete(code) +} + +/** + Get all currently available user invitations for the current tenant. + **/ +export async function getInviteCodes(): Promise { + const client = await redis.getInviteClient() + const invites: { key: string; value: Invite }[] = await client.scan() + + const results: InviteWithCode[] = invites.map(invite => { + return { + ...invite.value, + code: invite.key, + } + }) + if (!env.MULTI_TENANCY) { + return results + } + const tenantId = getTenantId() + return results.filter(invite => tenantId === invite.info.tenantId) +} + +export async function getExistingInvites( + emails: string[] +): Promise { + return (await getInviteCodes()).filter(invite => + emails.includes(invite.email) + ) +} diff --git a/packages/backend-core/src/cache/passwordReset.ts b/packages/backend-core/src/cache/passwordReset.ts new file mode 100644 index 0000000000..db32b520f7 --- /dev/null +++ b/packages/backend-core/src/cache/passwordReset.ts @@ -0,0 +1,49 @@ +import * as redis from "../redis/init" +import * as utils from "../utils" +import { Duration } from "../utils" + +const TTL_SECONDS = Duration.fromHours(1).toSeconds() + +interface PasswordReset { + userId: string + info: any +} + +/** + * Given a user ID this will store a code (that is returned) for an hour in redis. + * The user can then return this code for resetting their password (through their reset link). + * @param userId the ID of the user which is to be reset. + * @param info Info about the user/the reset process. + * @return returns the code that was stored to redis. + */ +export async function createCode(userId: string, info: any): Promise { + const code = utils.newid() + const client = await redis.getPasswordResetClient() + await client.store(code, { userId, info }, TTL_SECONDS) + return code +} + +/** + * Given a reset code this will lookup to redis, check if the code is valid. + * @param code The code provided via the email link. + * @return returns the user ID if it is found + */ +export async function getCode(code: string): Promise { + const client = await redis.getPasswordResetClient() + const value = (await client.get(code)) as PasswordReset | undefined + if (!value) { + throw new Error( + "Provided information is not valid, cannot reset password - please try again." + ) + } + return value +} + +/** + * Given a reset code this will invalidate it. + * @param code The code provided via the email link. + */ +export async function invalidateCode(code: string): Promise { + const client = await redis.getPasswordResetClient() + await client.delete(code) +} diff --git a/packages/backend-core/src/cache/tests/docWritethrough.spec.ts b/packages/backend-core/src/cache/tests/docWritethrough.spec.ts new file mode 100644 index 0000000000..47b3f0672f --- /dev/null +++ b/packages/backend-core/src/cache/tests/docWritethrough.spec.ts @@ -0,0 +1,294 @@ +import tk from "timekeeper" + +import _ from "lodash" +import { DBTestConfiguration, generator, structures } from "../../../tests" +import { getDB } from "../../db" + +import { + DocWritethrough, + DocWritethroughProcessor, + init, +} from "../docWritethrough" + +import InMemoryQueue from "../../queue/inMemoryQueue" + +const initialTime = Date.now() + +async function waitForQueueCompletion() { + const queue: InMemoryQueue = DocWritethroughProcessor.queue as never + await queue.waitForCompletion() +} + +describe("docWritethrough", () => { + beforeAll(() => { + init() + }) + + const config = new DBTestConfiguration() + + const db = getDB(structures.db.id()) + let documentId: string + let docWritethrough: DocWritethrough + + describe("patch", () => { + function generatePatchObject(fieldCount: number) { + const keys = generator.unique(() => generator.guid(), fieldCount) + return keys.reduce((acc, c) => { + acc[c] = generator.word() + return acc + }, {} as Record) + } + + beforeEach(async () => { + jest.clearAllMocks() + documentId = structures.uuid() + docWritethrough = new DocWritethrough(db, documentId) + }) + + it("patching will not persist until the messages are persisted", async () => { + await config.doInTenant(async () => { + await docWritethrough.patch(generatePatchObject(2)) + await docWritethrough.patch(generatePatchObject(2)) + + expect(await db.exists(documentId)).toBe(false) + }) + }) + + it("patching will persist when the messages are persisted", async () => { + await config.doInTenant(async () => { + const patch1 = generatePatchObject(2) + const patch2 = generatePatchObject(2) + await docWritethrough.patch(patch1) + await docWritethrough.patch(patch2) + + await waitForQueueCompletion() + + // This will not be persisted + const patch3 = generatePatchObject(3) + await docWritethrough.patch(patch3) + + expect(await db.get(documentId)).toEqual({ + _id: documentId, + ...patch1, + ...patch2, + _rev: expect.stringMatching(/2-.+/), + createdAt: new Date(initialTime).toISOString(), + updatedAt: new Date(initialTime).toISOString(), + }) + }) + }) + + it("patching will persist keeping the previous data", async () => { + await config.doInTenant(async () => { + const patch1 = generatePatchObject(2) + const patch2 = generatePatchObject(2) + await docWritethrough.patch(patch1) + await docWritethrough.patch(patch2) + + await waitForQueueCompletion() + + const patch3 = generatePatchObject(3) + await docWritethrough.patch(patch3) + + await waitForQueueCompletion() + + expect(await db.get(documentId)).toEqual( + expect.objectContaining({ + _id: documentId, + ...patch1, + ...patch2, + ...patch3, + }) + ) + }) + }) + + it("date audit fields are set correctly when persisting", async () => { + await config.doInTenant(async () => { + const patch1 = generatePatchObject(2) + const patch2 = generatePatchObject(2) + await docWritethrough.patch(patch1) + const date1 = new Date() + await waitForQueueCompletion() + await docWritethrough.patch(patch2) + + tk.travel(Date.now() + 100) + const date2 = new Date() + await waitForQueueCompletion() + + expect(date1).not.toEqual(date2) + expect(await db.get(documentId)).toEqual( + expect.objectContaining({ + createdAt: date1.toISOString(), + updatedAt: date2.toISOString(), + }) + ) + }) + }) + + it("concurrent patches will override keys", async () => { + await config.doInTenant(async () => { + const patch1 = generatePatchObject(2) + await docWritethrough.patch(patch1) + await waitForQueueCompletion() + const patch2 = generatePatchObject(1) + await docWritethrough.patch(patch2) + + const keyToOverride = _.sample(Object.keys(patch1))! + expect(await db.get(documentId)).toEqual( + expect.objectContaining({ + [keyToOverride]: patch1[keyToOverride], + }) + ) + + await waitForQueueCompletion() + + const patch3 = { + ...generatePatchObject(3), + [keyToOverride]: generator.word(), + } + await docWritethrough.patch(patch3) + await waitForQueueCompletion() + + expect(await db.get(documentId)).toEqual( + expect.objectContaining({ + ...patch1, + ...patch2, + ...patch3, + }) + ) + }) + }) + + it("concurrent patches to different docWritethrough will not pollute each other", async () => { + await config.doInTenant(async () => { + const secondDocWritethrough = new DocWritethrough( + db, + structures.db.id() + ) + + const doc1Patch = generatePatchObject(2) + await docWritethrough.patch(doc1Patch) + const doc2Patch = generatePatchObject(1) + await secondDocWritethrough.patch(doc2Patch) + + await waitForQueueCompletion() + + const doc1Patch2 = generatePatchObject(3) + await docWritethrough.patch(doc1Patch2) + const doc2Patch2 = generatePatchObject(3) + await secondDocWritethrough.patch(doc2Patch2) + await waitForQueueCompletion() + + expect(await db.get(docWritethrough.docId)).toEqual( + expect.objectContaining({ + ...doc1Patch, + ...doc1Patch2, + }) + ) + + expect(await db.get(secondDocWritethrough.docId)).toEqual( + expect.objectContaining({ + ...doc2Patch, + ...doc2Patch2, + }) + ) + }) + }) + + it("cached values are persisted only once", async () => { + await config.doInTenant(async () => { + const initialPatch = generatePatchObject(5) + + await docWritethrough.patch(initialPatch) + await waitForQueueCompletion() + + expect(await db.get(documentId)).toEqual( + expect.objectContaining(initialPatch) + ) + + await db.remove(await db.get(documentId)) + + await waitForQueueCompletion() + const extraPatch = generatePatchObject(5) + await docWritethrough.patch(extraPatch) + await waitForQueueCompletion() + + expect(await db.get(documentId)).toEqual( + expect.objectContaining(extraPatch) + ) + expect(await db.get(documentId)).not.toEqual( + expect.objectContaining(initialPatch) + ) + }) + }) + + it("concurrent calls will not cause conflicts", async () => { + async function parallelPatch(count: number) { + const patches = Array.from({ length: count }).map(() => + generatePatchObject(1) + ) + await Promise.all(patches.map(p => docWritethrough.patch(p))) + + return patches.reduce((acc, c) => { + acc = { ...acc, ...c } + return acc + }, {}) + } + const queueMessageSpy = jest.spyOn(DocWritethroughProcessor.queue, "add") + + await config.doInTenant(async () => { + let patches = await parallelPatch(5) + expect(queueMessageSpy).toHaveBeenCalledTimes(5) + + await waitForQueueCompletion() + expect(await db.get(documentId)).toEqual( + expect.objectContaining(patches) + ) + + patches = { ...patches, ...(await parallelPatch(40)) } + expect(queueMessageSpy).toHaveBeenCalledTimes(45) + + await waitForQueueCompletion() + expect(await db.get(documentId)).toEqual( + expect.objectContaining(patches) + ) + + patches = { ...patches, ...(await parallelPatch(10)) } + expect(queueMessageSpy).toHaveBeenCalledTimes(55) + + await waitForQueueCompletion() + expect(await db.get(documentId)).toEqual( + expect.objectContaining(patches) + ) + }) + }) + + // This is not yet supported + // eslint-disable-next-line jest/no-disabled-tests + it.skip("patches will execute in order", async () => { + let incrementalValue = 0 + const keyToOverride = generator.word() + async function incrementalPatches(count: number) { + for (let i = 0; i < count; i++) { + await docWritethrough.patch({ [keyToOverride]: incrementalValue++ }) + } + } + + await config.doInTenant(async () => { + await incrementalPatches(5) + + await waitForQueueCompletion() + expect(await db.get(documentId)).toEqual( + expect.objectContaining({ [keyToOverride]: 5 }) + ) + + await incrementalPatches(40) + await waitForQueueCompletion() + expect(await db.get(documentId)).toEqual( + expect.objectContaining({ [keyToOverride]: 45 }) + ) + }) + }) + }) +}) diff --git a/packages/backend-core/src/cache/tests/user.spec.ts b/packages/backend-core/src/cache/tests/user.spec.ts index 80e5bc3063..49a8d51c16 100644 --- a/packages/backend-core/src/cache/tests/user.spec.ts +++ b/packages/backend-core/src/cache/tests/user.spec.ts @@ -55,8 +55,8 @@ describe("user cache", () => { })), }) - expect(UserDB.bulkGet).toBeCalledTimes(1) - expect(UserDB.bulkGet).toBeCalledWith(userIdsToRequest) + expect(UserDB.bulkGet).toHaveBeenCalledTimes(1) + expect(UserDB.bulkGet).toHaveBeenCalledWith(userIdsToRequest) }) it("on a second all, all of them are retrieved from cache", async () => { @@ -82,7 +82,7 @@ describe("user cache", () => { ), }) - expect(UserDB.bulkGet).toBeCalledTimes(1) + expect(UserDB.bulkGet).toHaveBeenCalledTimes(1) }) it("when some users are cached, only the missing ones are retrieved from db", async () => { @@ -110,8 +110,8 @@ describe("user cache", () => { ), }) - expect(UserDB.bulkGet).toBeCalledTimes(1) - expect(UserDB.bulkGet).toBeCalledWith([ + expect(UserDB.bulkGet).toHaveBeenCalledTimes(1) + expect(UserDB.bulkGet).toHaveBeenCalledWith([ userIdsToRequest[1], userIdsToRequest[2], userIdsToRequest[4], diff --git a/packages/backend-core/src/cache/tests/writethrough.spec.ts b/packages/backend-core/src/cache/tests/writethrough.spec.ts index 97d3ece7a6..37887b4bd9 100644 --- a/packages/backend-core/src/cache/tests/writethrough.spec.ts +++ b/packages/backend-core/src/cache/tests/writethrough.spec.ts @@ -1,15 +1,16 @@ import { DBTestConfiguration } from "../../../tests/extra" -import { - structures, - expectFunctionWasCalledTimesWith, - mocks, -} from "../../../tests" +import { structures } from "../../../tests" import { Writethrough } from "../writethrough" import { getDB } from "../../db" +import { Document } from "@budibase/types" import tk from "timekeeper" tk.freeze(Date.now()) +interface ValueDoc extends Document { + value: any +} + const DELAY = 5000 describe("writethrough", () => { @@ -117,7 +118,7 @@ describe("writethrough", () => { describe("get", () => { it("should be able to retrieve", async () => { await config.doInTenant(async () => { - const response = await writethrough.get(docId) + const response = await writethrough.get(docId) expect(response.value).toBe(4) }) }) diff --git a/packages/backend-core/src/cache/user.ts b/packages/backend-core/src/cache/user.ts index 313b9a4d4a..ecfa20f99e 100644 --- a/packages/backend-core/src/cache/user.ts +++ b/packages/backend-core/src/cache/user.ts @@ -6,7 +6,7 @@ import env from "../environment" import * as accounts from "../accounts" import { UserDB } from "../users" import { sdk } from "@budibase/shared-core" -import { User } from "@budibase/types" +import { User, UserMetadata } from "@budibase/types" const EXPIRY_SECONDS = 3600 @@ -15,7 +15,7 @@ const EXPIRY_SECONDS = 3600 */ async function populateFromDB(userId: string, tenantId: string) { const db = tenancy.getTenantDB(tenantId) - const user = await db.get(userId) + const user = await db.get(userId) user.budibaseAccess = true if (!env.SELF_HOSTED && !env.DISABLE_ACCOUNT_PORTAL) { const account = await accounts.getAccount(user.email) diff --git a/packages/backend-core/src/cache/writethrough.ts b/packages/backend-core/src/cache/writethrough.ts index c331d791a6..cd7409ca15 100644 --- a/packages/backend-core/src/cache/writethrough.ts +++ b/packages/backend-core/src/cache/writethrough.ts @@ -7,8 +7,8 @@ import * as locks from "../redis/redlockImpl" const DEFAULT_WRITE_RATE_MS = 10000 let CACHE: BaseCache | null = null -interface CacheItem { - doc: any +interface CacheItem { + doc: T lastWrite: number } @@ -24,7 +24,10 @@ function makeCacheKey(db: Database, key: string) { return db.name + key } -function makeCacheItem(doc: any, lastWrite: number | null = null): CacheItem { +function makeCacheItem( + doc: T, + lastWrite: number | null = null +): CacheItem { return { doc, lastWrite: lastWrite || Date.now() } } @@ -35,7 +38,7 @@ async function put( ) { const cache = await getCache() const key = doc._id - let cacheItem: CacheItem | undefined + let cacheItem: CacheItem | undefined if (key) { cacheItem = await cache.get(makeCacheKey(db, key)) } @@ -53,11 +56,8 @@ async function put( const writeDb = async (toWrite: any) => { // doc should contain the _id and _rev const response = await db.put(toWrite, { force: true }) - output = { - ...doc, - _id: response.id, - _rev: response.rev, - } + output._id = response.id + output._rev = response.rev } try { await writeDb(doc) @@ -84,12 +84,12 @@ async function put( return { ok: true, id: output._id, rev: output._rev } } -async function get(db: Database, id: string): Promise { +async function get(db: Database, id: string): Promise { const cache = await getCache() const cacheKey = makeCacheKey(db, id) - let cacheItem: CacheItem = await cache.get(cacheKey) + let cacheItem: CacheItem = await cache.get(cacheKey) if (!cacheItem) { - const doc = await db.get(id) + const doc = await db.get(id) cacheItem = makeCacheItem(doc) await cache.store(cacheKey, cacheItem) } @@ -123,8 +123,8 @@ export class Writethrough { return put(this.db, doc, writeRateMs) } - async get(id: string) { - return get(this.db, id) + async get(id: string) { + return get(this.db, id) } async remove(docOrId: any, rev?: any) { diff --git a/packages/backend-core/src/configs/configs.ts b/packages/backend-core/src/configs/configs.ts index 0c83ed005d..0d189e3f7d 100644 --- a/packages/backend-core/src/configs/configs.ts +++ b/packages/backend-core/src/configs/configs.ts @@ -17,7 +17,6 @@ import { DocumentType, SEPARATOR } from "../constants" import { CacheKey, TTL, withCache } from "../cache" import * as context from "../context" import env from "../environment" -import environment from "../environment" // UTILS @@ -181,10 +180,10 @@ export async function getGoogleDatasourceConfig(): Promise< } export function getDefaultGoogleConfig(): GoogleInnerConfig | undefined { - if (environment.GOOGLE_CLIENT_ID && environment.GOOGLE_CLIENT_SECRET) { + if (env.GOOGLE_CLIENT_ID && env.GOOGLE_CLIENT_SECRET) { return { - clientID: environment.GOOGLE_CLIENT_ID!, - clientSecret: environment.GOOGLE_CLIENT_SECRET!, + clientID: env.GOOGLE_CLIENT_ID!, + clientSecret: env.GOOGLE_CLIENT_SECRET!, activated: true, } } diff --git a/packages/backend-core/src/constants/db.ts b/packages/backend-core/src/constants/db.ts index b33b4835a9..f4caac502e 100644 --- a/packages/backend-core/src/constants/db.ts +++ b/packages/backend-core/src/constants/db.ts @@ -1,4 +1,5 @@ import { prefixed, DocumentType } from "@budibase/types" + export { SEPARATOR, UNICODE_MAX, @@ -28,7 +29,7 @@ export enum ViewName { APP_BACKUP_BY_TRIGGER = "by_trigger", } -export const DeprecatedViews = { +export const DeprecatedViews: Record = { [ViewName.USER_BY_EMAIL]: [ // removed due to inaccuracy in view doc filter logic "by_email", @@ -56,6 +57,9 @@ export const StaticDatabases = { AUDIT_LOGS: { name: "audit-logs", }, + SCIM_LOGS: { + name: "scim-logs", + }, } export const APP_PREFIX = prefixed(DocumentType.APP) diff --git a/packages/backend-core/src/constants/misc.ts b/packages/backend-core/src/constants/misc.ts index 8ef34196ed..aee099e10a 100644 --- a/packages/backend-core/src/constants/misc.ts +++ b/packages/backend-core/src/constants/misc.ts @@ -11,24 +11,7 @@ export enum Cookie { OIDC_CONFIG = "budibase:oidc:config", } -export enum Header { - API_KEY = "x-budibase-api-key", - LICENSE_KEY = "x-budibase-license-key", - API_VER = "x-budibase-api-version", - APP_ID = "x-budibase-app-id", - SESSION_ID = "x-budibase-session-id", - TYPE = "x-budibase-type", - PREVIEW_ROLE = "x-budibase-role", - TENANT_ID = "x-budibase-tenant-id", - VERIFICATION_CODE = "x-budibase-verification-code", - RETURN_VERIFICATION_CODE = "x-budibase-return-verification-code", - RESET_PASSWORD_CODE = "x-budibase-reset-password-code", - RETURN_RESET_PASSWORD_CODE = "x-budibase-return-reset-password-code", - TOKEN = "x-budibase-token", - CSRF_TOKEN = "x-csrf-token", - CORRELATION_ID = "x-budibase-correlation-id", - AUTHORIZATION = "authorization", -} +export { Header } from "@budibase/shared-core" export enum GlobalRole { OWNER = "owner", diff --git a/packages/backend-core/src/context/Context.ts b/packages/backend-core/src/context/Context.ts index d29b6935a8..a59f5c6503 100644 --- a/packages/backend-core/src/context/Context.ts +++ b/packages/backend-core/src/context/Context.ts @@ -4,7 +4,7 @@ import { ContextMap } from "./types" export default class Context { static storage = new AsyncLocalStorage() - static run(context: ContextMap, func: any) { + static run(context: ContextMap, func: () => T) { return Context.storage.run(context, () => func()) } diff --git a/packages/backend-core/src/context/mainContext.ts b/packages/backend-core/src/context/mainContext.ts index 609c18abb5..6cea7efeba 100644 --- a/packages/backend-core/src/context/mainContext.ts +++ b/packages/backend-core/src/context/mainContext.ts @@ -10,7 +10,7 @@ import { StaticDatabases, DEFAULT_TENANT_ID, } from "../constants" -import { Database, IdentityContext } from "@budibase/types" +import { Database, IdentityContext, Snippet, App } from "@budibase/types" import { ContextMap } from "./types" let TEST_APP_ID: string | null = null @@ -35,6 +35,17 @@ export function getAuditLogDBName(tenantId?: string) { } } +export function getScimDBName(tenantId?: string) { + if (!tenantId) { + tenantId = getTenantId() + } + if (tenantId === DEFAULT_TENANT_ID) { + return StaticDatabases.SCIM_LOGS.name + } else { + return `${tenantId}${SEPARATOR}${StaticDatabases.SCIM_LOGS.name}` + } +} + export function baseGlobalDBName(tenantId: string | undefined | null) { if (!tenantId || tenantId === DEFAULT_TENANT_ID) { return StaticDatabases.GLOBAL.name @@ -98,21 +109,23 @@ function updateContext(updates: ContextMap): ContextMap { return context } -async function newContext(updates: ContextMap, task: any) { +async function newContext(updates: ContextMap, task: () => T) { + guardMigration() + // see if there already is a context setup let context: ContextMap = updateContext(updates) return Context.run(context, task) } -export async function doInAutomationContext(params: { +export async function doInAutomationContext(params: { appId: string automationId: string - task: any -}): Promise { - const tenantId = getTenantIDFromAppID(params.appId) + task: () => T +}): Promise { + await ensureSnippetContext() return newContext( { - tenantId, + tenantId: getTenantIDFromAppID(params.appId), appId: params.appId, automationId: params.automationId, }, @@ -132,7 +145,7 @@ export async function doInContext(appId: string, task: any): Promise { } export async function doInTenant( - tenantId: string | null, + tenantId: string | undefined, task: () => T ): Promise { // make sure default always selected in single tenancy @@ -144,31 +157,35 @@ export async function doInTenant( return newContext(updates, task) } -export async function doInAppContext( - appId: string | null, - task: any -): Promise { - if (!appId && !env.isTest()) { +export async function doInAppContext( + appId: string, + task: () => T +): Promise { + return _doInAppContext(appId, task) +} + +async function _doInAppContext( + appId: string, + task: () => T, + extraContextSettings?: ContextMap +): Promise { + if (!appId) { throw new Error("appId is required") } - let updates: ContextMap - if (!appId) { - updates = { appId: "" } - } else { - const tenantId = getTenantIDFromAppID(appId) - updates = { appId } - if (tenantId) { - updates.tenantId = tenantId - } + const tenantId = getTenantIDFromAppID(appId) + const updates: ContextMap = { appId, ...extraContextSettings } + if (tenantId) { + updates.tenantId = tenantId } + return newContext(updates, task) } -export async function doInIdentityContext( +export async function doInIdentityContext( identity: IdentityContext, - task: any -): Promise { + task: () => T +): Promise { if (!identity) { throw new Error("identity is required") } @@ -182,6 +199,24 @@ export async function doInIdentityContext( return newContext(context, task) } +function guardMigration() { + const context = Context.get() + if (context?.isMigrating) { + throw new Error( + "The context cannot be changed, a migration is currently running" + ) + } +} + +export async function doInAppMigrationContext( + appId: string, + task: () => T +): Promise { + return _doInAppContext(appId, task, { + isMigrating: true, + }) +} + export function getIdentity(): IdentityContext | undefined { try { const context = Context.get() @@ -246,6 +281,27 @@ export function doInScimContext(task: any) { return newContext(updates, task) } +export async function ensureSnippetContext() { + const ctx = getCurrentContext() + + // If we've already added snippets to context, continue + if (!ctx || ctx.snippets) { + return + } + + // Otherwise get snippets for this app and update context + let snippets: Snippet[] | undefined + const db = getAppDB() + if (db && !env.isTest()) { + const app = await db.get(DocumentType.APP_METADATA) + snippets = app.snippets + } + + // Always set snippets to a non-null value so that we can tell we've attempted + // to load snippets + ctx.snippets = snippets || [] +} + export function getEnvironmentVariables() { const context = Context.get() if (!context.environmentVariables) { @@ -276,6 +332,9 @@ export function getAuditLogsDB(): Database { */ export function getAppDB(opts?: any): Database { const appId = getAppId() + if (!appId) { + throw new Error("Unable to retrieve app DB - no app ID.") + } return getDB(appId, opts) } @@ -308,3 +367,11 @@ export function isScim(): boolean { const scimCall = context?.isScim return !!scimCall } + +export function getCurrentContext(): ContextMap | undefined { + try { + return Context.get() + } catch (e) { + return undefined + } +} diff --git a/packages/backend-core/src/context/tests/index.spec.ts b/packages/backend-core/src/context/tests/index.spec.ts index d8bb598af1..2d89131549 100644 --- a/packages/backend-core/src/context/tests/index.spec.ts +++ b/packages/backend-core/src/context/tests/index.spec.ts @@ -1,6 +1,11 @@ import { testEnv } from "../../../tests/extra" import * as context from "../" import { DEFAULT_TENANT_ID } from "../../constants" +import { structures } from "../../../tests" +import { db } from "../.." +import Context from "../Context" +import { ContextMap } from "../types" +import { IdentityType } from "@budibase/types" describe("context", () => { describe("doInTenant", () => { @@ -144,4 +149,107 @@ describe("context", () => { expect(isScim).toBe(false) }) }) + + describe("doInAppMigrationContext", () => { + it("the context is set correctly", async () => { + const appId = db.generateAppID() + + await context.doInAppMigrationContext(appId, () => { + const context = Context.get() + + const expected: ContextMap = { + appId, + isMigrating: true, + } + expect(context).toEqual(expected) + }) + }) + + it("the context is set correctly when running in a tenant id", async () => { + const tenantId = structures.tenant.id() + const appId = db.generateAppID(tenantId) + + await context.doInAppMigrationContext(appId, () => { + const context = Context.get() + + const expected: ContextMap = { + appId, + isMigrating: true, + tenantId, + } + expect(context).toEqual(expected) + }) + }) + + it("the context is not modified outside the delegate", async () => { + const appId = db.generateAppID() + + expect(Context.get()).toBeUndefined() + + await context.doInAppMigrationContext(appId, () => { + const context = Context.get() + + const expected: ContextMap = { + appId, + isMigrating: true, + } + expect(context).toEqual(expected) + }) + + expect(Context.get()).toBeUndefined() + }) + + it.each([ + [ + "doInAppMigrationContext", + () => context.doInAppMigrationContext(db.generateAppID(), () => {}), + ], + [ + "doInAppContext", + () => context.doInAppContext(db.generateAppID(), () => {}), + ], + [ + "doInAutomationContext", + () => + context.doInAutomationContext({ + appId: db.generateAppID(), + automationId: structures.generator.guid(), + task: () => {}, + }), + ], + ["doInContext", () => context.doInContext(db.generateAppID(), () => {})], + [ + "doInEnvironmentContext", + () => context.doInEnvironmentContext({}, () => {}), + ], + [ + "doInIdentityContext", + () => + context.doInIdentityContext( + { + account: undefined, + type: IdentityType.USER, + _id: structures.users.user()._id!, + }, + () => {} + ), + ], + ["doInScimContext", () => context.doInScimContext(() => {})], + [ + "doInTenant", + () => context.doInTenant(structures.tenant.id(), () => {}), + ], + ])( + "a nested context.%s function cannot run", + async (_, otherContextCall: () => Promise) => { + await expect( + context.doInAppMigrationContext(db.generateAppID(), async () => { + await otherContextCall() + }) + ).rejects.toThrow( + "The context cannot be changed, a migration is currently running" + ) + } + ) + }) }) diff --git a/packages/backend-core/src/context/types.ts b/packages/backend-core/src/context/types.ts index d687a93594..f297d3089f 100644 --- a/packages/backend-core/src/context/types.ts +++ b/packages/backend-core/src/context/types.ts @@ -1,4 +1,4 @@ -import { IdentityContext } from "@budibase/types" +import { IdentityContext, Snippet, VM } from "@budibase/types" // keep this out of Budibase types, don't want to expose context info export type ContextMap = { @@ -8,4 +8,8 @@ export type ContextMap = { environmentVariables?: Record isScim?: boolean automationId?: string + isMigrating?: boolean + vm?: VM + cleanup?: (() => void | Promise)[] + snippets?: Snippet[] } diff --git a/packages/backend-core/src/db/Replication.ts b/packages/backend-core/src/db/Replication.ts index f91a37ce8f..735c2fa86e 100644 --- a/packages/backend-core/src/db/Replication.ts +++ b/packages/backend-core/src/db/Replication.ts @@ -1,66 +1,57 @@ +import PouchDB from "pouchdb" import { getPouchDB, closePouchDB } from "./couch" import { DocumentType } from "../constants" class Replication { - source: any - target: any - replication: any + source: PouchDB.Database + target: PouchDB.Database - /** - * - * @param source - the DB you want to replicate or rollback to - * @param target - the DB you want to replicate to, or rollback from - */ - constructor({ source, target }: any) { + constructor({ source, target }: { source: string; target: string }) { this.source = getPouchDB(source) this.target = getPouchDB(target) } - close() { - return Promise.all([closePouchDB(this.source), closePouchDB(this.target)]) + async close() { + await Promise.all([closePouchDB(this.source), closePouchDB(this.target)]) } - promisify(operation: any, opts = {}) { - return new Promise(resolve => { - operation(this.target, opts) - .on("denied", function (err: any) { + replicate(opts: PouchDB.Replication.ReplicateOptions = {}) { + return new Promise>(resolve => { + this.source.replicate + .to(this.target, opts) + .on("denied", function (err) { // a document failed to replicate (e.g. due to permissions) throw new Error(`Denied: Document failed to replicate ${err}`) }) - .on("complete", function (info: any) { + .on("complete", function (info) { return resolve(info) }) - .on("error", function (err: any) { - throw new Error(`Replication Error: ${err}`) + .on("error", function (err) { + throw err }) }) } - /** - * Two way replication operation, intended to be promise based. - * @param opts - PouchDB replication options - */ - sync(opts = {}) { - this.replication = this.promisify(this.source.sync, opts) - return this.replication - } + appReplicateOpts( + opts: PouchDB.Replication.ReplicateOptions = {} + ): PouchDB.Replication.ReplicateOptions { + if (typeof opts.filter === "string") { + return opts + } - /** - * One way replication operation, intended to be promise based. - * @param opts - PouchDB replication options - */ - replicate(opts = {}) { - this.replication = this.promisify(this.source.replicate.to, opts) - return this.replication - } + const filter = opts.filter + delete opts.filter - appReplicateOpts() { return { - filter: (doc: any) => { + ...opts, + filter: (doc: any, params: any) => { if (doc._id && doc._id.startsWith(DocumentType.AUTOMATION_LOG)) { return false } - return doc._id !== DocumentType.APP_METADATA + if (doc._id === DocumentType.APP_METADATA) { + return false + } + return filter ? filter(doc, params) : true }, } } @@ -75,10 +66,6 @@ class Replication { // take the opportunity to remove deleted tombstones await this.replicate() } - - cancel() { - this.replication.cancel() - } } export default Replication diff --git a/packages/backend-core/src/db/couch/DatabaseImpl.ts b/packages/backend-core/src/db/couch/DatabaseImpl.ts index 29ca4123f5..416313f520 100644 --- a/packages/backend-core/src/db/couch/DatabaseImpl.ts +++ b/packages/backend-core/src/db/couch/DatabaseImpl.ts @@ -10,12 +10,17 @@ import { DatabaseDeleteIndexOpts, Document, isDocument, + RowResponse, + RowValue, } from "@budibase/types" import { getCouchInfo } from "./connections" import { directCouchUrlCall } from "./utils" import { getPouchDB } from "./pouchDB" import { WriteStream, ReadStream } from "fs" import { newid } from "../../docIds/newid" +import { DDInstrumentedDatabase } from "../instrumentation" + +const DATABASE_NOT_FOUND = "Database does not exist." function buildNano(couchInfo: { url: string; cookie: string }) { return Nano({ @@ -29,15 +34,15 @@ function buildNano(couchInfo: { url: string; cookie: string }) { }) } +type DBCall = () => Promise + export function DatabaseWithConnection( dbName: string, connection: string, opts?: DatabaseOpts ) { - if (!connection) { - throw new Error("Must provide connection details") - } - return new DatabaseImpl(dbName, opts, connection) + const db = new DatabaseImpl(dbName, opts, connection) + return new DDInstrumentedDatabase(db) } export class DatabaseImpl implements Database { @@ -48,10 +53,7 @@ export class DatabaseImpl implements Database { private readonly couchInfo = getCouchInfo() - constructor(dbName?: string, opts?: DatabaseOpts, connection?: string) { - if (dbName == null) { - throw new Error("Database name cannot be undefined.") - } + constructor(dbName: string, opts?: DatabaseOpts, connection?: string) { this.name = dbName this.pouchOpts = opts || {} if (connection) { @@ -68,7 +70,15 @@ export class DatabaseImpl implements Database { DatabaseImpl.nano = buildNano(couchInfo) } - async exists() { + exists(docId?: string) { + if (docId === undefined) { + return this.dbExists() + } + + return this.docExists(docId) + } + + private async dbExists() { const response = await directCouchUrlCall({ url: `${this.couchInfo.url}/${this.name}`, method: "HEAD", @@ -77,11 +87,24 @@ export class DatabaseImpl implements Database { return response.status === 200 } + private async docExists(id: string): Promise { + try { + await this.performCall(db => () => db.head(id)) + return true + } catch { + return false + } + } + private nano() { return this.instanceNano || DatabaseImpl.nano } - async checkSetup() { + private getDb() { + return this.nano().db.use(this.name) + } + + private async checkAndCreateDb() { let shouldCreate = !this.pouchOpts?.skip_setup // check exists in a lightweight fashion let exists = await this.exists() @@ -98,45 +121,84 @@ export class DatabaseImpl implements Database { } } } - return this.nano().db.use(this.name) + return this.getDb() } - private async updateOutput(fnc: any) { + // this function fetches the DB and handles if DB creation is needed + private async performCall( + call: (db: Nano.DocumentScope) => Promise> | DBCall + ): Promise { + const db = this.getDb() + const fnc = await call(db) try { return await fnc() } catch (err: any) { - if (err.statusCode) { + if (err.statusCode === 404 && err.reason === DATABASE_NOT_FOUND) { + await this.checkAndCreateDb() + return await this.performCall(call) + } else if (err.statusCode) { err.status = err.statusCode } throw err } } - async get(id?: string): Promise { - const db = await this.checkSetup() - if (!id) { - throw new Error("Unable to get doc without a valid _id.") + async get(id?: string): Promise { + return this.performCall(db => { + if (!id) { + throw new Error("Unable to get doc without a valid _id.") + } + return () => db.get(id) + }) + } + + async getMultiple( + ids: string[], + opts?: { allowMissing?: boolean } + ): Promise { + // get unique + ids = [...new Set(ids)] + const response = await this.allDocs({ + keys: ids, + include_docs: true, + }) + const rowUnavailable = (row: RowResponse) => { + // row is deleted - key lookup can return this + if (row.doc == null || ("deleted" in row.value && row.value.deleted)) { + return true + } + return row.error === "not_found" } - return this.updateOutput(() => db.get(id)) + + const rows = response.rows.filter(row => !rowUnavailable(row)) + const someMissing = rows.length !== response.rows.length + // some were filtered out - means some missing + if (!opts?.allowMissing && someMissing) { + const missing = response.rows.filter(row => rowUnavailable(row)) + const missingIds = missing.map(row => row.key).join(", ") + throw new Error(`Unable to get documents: ${missingIds}`) + } + return rows.map(row => row.doc!) } async remove(idOrDoc: string | Document, rev?: string) { - const db = await this.checkSetup() - let _id: string - let _rev: string + return this.performCall(db => { + let _id: string + let _rev: string - if (isDocument(idOrDoc)) { - _id = idOrDoc._id! - _rev = idOrDoc._rev! - } else { - _id = idOrDoc - _rev = rev! - } + if (isDocument(idOrDoc)) { + _id = idOrDoc._id! + _rev = idOrDoc._rev! + } else { + _id = idOrDoc + _rev = rev! + } - if (!_id || !_rev) { - throw new Error("Unable to remove doc without a valid _id and _rev.") - } - return this.updateOutput(() => db.destroy(_id, _rev)) + if (!_id || !_rev) { + throw new Error("Unable to remove doc without a valid _id and _rev.") + } + return () => db.destroy(_id, _rev) + }) } async post(document: AnyDocument, opts?: DatabasePutOpts) { @@ -150,43 +212,49 @@ export class DatabaseImpl implements Database { if (!document._id) { throw new Error("Cannot store document without _id field.") } - const db = await this.checkSetup() - if (!document.createdAt) { - document.createdAt = new Date().toISOString() - } - document.updatedAt = new Date().toISOString() - if (opts?.force && document._id) { - try { - const existing = await this.get(document._id) - if (existing) { - document._rev = existing._rev - } - } catch (err: any) { - if (err.status !== 404) { - throw err + return this.performCall(async db => { + if (!document.createdAt) { + document.createdAt = new Date().toISOString() + } + document.updatedAt = new Date().toISOString() + if (opts?.force && document._id) { + try { + const existing = await this.get(document._id) + if (existing) { + document._rev = existing._rev + } + } catch (err: any) { + if (err.status !== 404) { + throw err + } } } - } - return this.updateOutput(() => db.insert(document)) + return () => db.insert(document) + }) } async bulkDocs(documents: AnyDocument[]) { - const db = await this.checkSetup() - return this.updateOutput(() => db.bulk({ docs: documents })) + return this.performCall(db => { + return () => db.bulk({ docs: documents }) + }) } - async allDocs(params: DatabaseQueryOpts): Promise> { - const db = await this.checkSetup() - return this.updateOutput(() => db.list(params)) + async allDocs( + params: DatabaseQueryOpts + ): Promise> { + return this.performCall(db => { + return () => db.list(params) + }) } - async query( + async query( viewName: string, params: DatabaseQueryOpts ): Promise> { - const db = await this.checkSetup() - const [database, view] = viewName.split("/") - return this.updateOutput(() => db.view(database, view, params)) + return this.performCall(db => { + const [database, view] = viewName.split("/") + return () => db.view(database, view, params) + }) } async destroy() { @@ -203,8 +271,9 @@ export class DatabaseImpl implements Database { } async compact() { - const db = await this.checkSetup() - return this.updateOutput(() => db.compact()) + return this.performCall(db => { + return () => db.compact() + }) } // All below functions are in-frequently called, just utilise PouchDB diff --git a/packages/backend-core/src/db/db.ts b/packages/backend-core/src/db/db.ts index 9aae64b892..197770298e 100644 --- a/packages/backend-core/src/db/db.ts +++ b/packages/backend-core/src/db/db.ts @@ -1,11 +1,9 @@ -import env from "../environment" import { directCouchQuery, DatabaseImpl } from "./couch" -import { CouchFindOptions, Database } from "@budibase/types" +import { CouchFindOptions, Database, DatabaseOpts } from "@budibase/types" +import { DDInstrumentedDatabase } from "./instrumentation" -const dbList = new Set() - -export function getDB(dbName?: string, opts?: any): Database { - return new DatabaseImpl(dbName, opts) +export function getDB(dbName: string, opts?: DatabaseOpts): Database { + return new DDInstrumentedDatabase(new DatabaseImpl(dbName, opts)) } // we have to use a callback for this so that we can close @@ -14,7 +12,7 @@ export function getDB(dbName?: string, opts?: any): Database { export async function doWithDB( dbName: string, cb: (db: Database) => Promise, - opts = {} + opts?: DatabaseOpts ) { const db = getDB(dbName, opts) // need this to be async so that we can correctly close DB after all @@ -22,13 +20,6 @@ export async function doWithDB( return await cb(db) } -export function allDbs() { - if (!env.isTest()) { - throw new Error("Cannot be used outside test environment.") - } - return [...dbList] -} - export async function directCouchAllDbs(queryString?: string) { let couchPath = "/_all_dbs" if (queryString) { diff --git a/packages/backend-core/src/db/instrumentation.ts b/packages/backend-core/src/db/instrumentation.ts new file mode 100644 index 0000000000..795f30d7cd --- /dev/null +++ b/packages/backend-core/src/db/instrumentation.ts @@ -0,0 +1,152 @@ +import { + DocumentDestroyResponse, + DocumentInsertResponse, + DocumentBulkResponse, + OkResponse, +} from "@budibase/nano" +import { + AllDocsResponse, + AnyDocument, + Database, + DatabaseDumpOpts, + DatabasePutOpts, + DatabaseQueryOpts, + Document, + RowValue, +} from "@budibase/types" +import tracer from "dd-trace" +import { Writable } from "stream" + +export class DDInstrumentedDatabase implements Database { + constructor(private readonly db: Database) {} + + get name(): string { + return this.db.name + } + + exists(docId?: string): Promise { + return tracer.trace("db.exists", span => { + span?.addTags({ db_name: this.name, doc_id: docId }) + if (docId) { + return this.db.exists(docId) + } + return this.db.exists() + }) + } + + get(id?: string | undefined): Promise { + return tracer.trace("db.get", span => { + span?.addTags({ db_name: this.name, doc_id: id }) + return this.db.get(id) + }) + } + + getMultiple( + ids: string[], + opts?: { allowMissing?: boolean | undefined } | undefined + ): Promise { + return tracer.trace("db.getMultiple", span => { + span?.addTags({ + db_name: this.name, + num_docs: ids.length, + allow_missing: opts?.allowMissing, + }) + return this.db.getMultiple(ids, opts) + }) + } + + remove( + id: string | Document, + rev?: string | undefined + ): Promise { + return tracer.trace("db.remove", span => { + span?.addTags({ db_name: this.name, doc_id: id }) + return this.db.remove(id, rev) + }) + } + + put( + document: AnyDocument, + opts?: DatabasePutOpts | undefined + ): Promise { + return tracer.trace("db.put", span => { + span?.addTags({ db_name: this.name, doc_id: document._id }) + return this.db.put(document, opts) + }) + } + + bulkDocs(documents: AnyDocument[]): Promise { + return tracer.trace("db.bulkDocs", span => { + span?.addTags({ db_name: this.name, num_docs: documents.length }) + return this.db.bulkDocs(documents) + }) + } + + allDocs( + params: DatabaseQueryOpts + ): Promise> { + return tracer.trace("db.allDocs", span => { + span?.addTags({ db_name: this.name }) + return this.db.allDocs(params) + }) + } + + query( + viewName: string, + params: DatabaseQueryOpts + ): Promise> { + return tracer.trace("db.query", span => { + span?.addTags({ db_name: this.name, view_name: viewName }) + return this.db.query(viewName, params) + }) + } + + destroy(): Promise { + return tracer.trace("db.destroy", span => { + span?.addTags({ db_name: this.name }) + return this.db.destroy() + }) + } + + compact(): Promise { + return tracer.trace("db.compact", span => { + span?.addTags({ db_name: this.name }) + return this.db.compact() + }) + } + + dump(stream: Writable, opts?: DatabaseDumpOpts | undefined): Promise { + return tracer.trace("db.dump", span => { + span?.addTags({ db_name: this.name }) + return this.db.dump(stream, opts) + }) + } + + load(...args: any[]): Promise { + return tracer.trace("db.load", span => { + span?.addTags({ db_name: this.name }) + return this.db.load(...args) + }) + } + + createIndex(...args: any[]): Promise { + return tracer.trace("db.createIndex", span => { + span?.addTags({ db_name: this.name }) + return this.db.createIndex(...args) + }) + } + + deleteIndex(...args: any[]): Promise { + return tracer.trace("db.deleteIndex", span => { + span?.addTags({ db_name: this.name }) + return this.db.deleteIndex(...args) + }) + } + + getIndexes(...args: any[]): Promise { + return tracer.trace("db.getIndexes", span => { + span?.addTags({ db_name: this.name }) + return this.db.getIndexes(...args) + }) + } +} diff --git a/packages/backend-core/src/db/lucene.ts b/packages/backend-core/src/db/lucene.ts index f982ee67d0..987d750d45 100644 --- a/packages/backend-core/src/db/lucene.ts +++ b/packages/backend-core/src/db/lucene.ts @@ -10,10 +10,6 @@ interface SearchResponse { totalRows: number } -interface PaginatedSearchResponse extends SearchResponse { - hasNextPage: boolean -} - export type SearchParams = { tableId?: string sort?: string @@ -247,7 +243,7 @@ export class QueryBuilder { } // Escape characters if (!this.#noEscaping && escape && originalType === "string") { - value = `${value}`.replace(/[ \/#+\-&|!(){}\]^"~*?:\\]/g, "\\$&") + value = `${value}`.replace(/[ /#+\-&|!(){}\]^"~*?:\\]/g, "\\$&") } // Wrap in quotes diff --git a/packages/backend-core/src/db/searchIndexes/searchIndexes.ts b/packages/backend-core/src/db/searchIndexes/searchIndexes.ts index b953e3516e..8742d405f2 100644 --- a/packages/backend-core/src/db/searchIndexes/searchIndexes.ts +++ b/packages/backend-core/src/db/searchIndexes/searchIndexes.ts @@ -34,12 +34,12 @@ export async function createUserIndex() { } let idxKey = prev != null ? `${prev}.${key}` : key if (typeof input[key] === "string") { + // @ts-expect-error index is available in a CouchDB map function // eslint-disable-next-line no-undef - // @ts-ignore index(idxKey, input[key].toLowerCase(), { facet: true }) } else if (typeof input[key] !== "object") { + // @ts-expect-error index is available in a CouchDB map function // eslint-disable-next-line no-undef - // @ts-ignore index(idxKey, input[key], { facet: true }) } else { idx(input[key], idxKey) diff --git a/packages/backend-core/src/db/tests/DatabaseImpl.spec.ts b/packages/backend-core/src/db/tests/DatabaseImpl.spec.ts new file mode 100644 index 0000000000..586f13f417 --- /dev/null +++ b/packages/backend-core/src/db/tests/DatabaseImpl.spec.ts @@ -0,0 +1,55 @@ +import _ from "lodash" +import { AnyDocument } from "@budibase/types" +import { generator } from "../../../tests" +import { DatabaseImpl } from "../couch" +import { newid } from "../../utils" + +describe("DatabaseImpl", () => { + const database = new DatabaseImpl(generator.word()) + const documents: AnyDocument[] = [] + + beforeAll(async () => { + const docsToCreate = Array.from({ length: 10 }).map(() => ({ + _id: newid(), + })) + const createdDocs = await database.bulkDocs(docsToCreate) + + documents.push(...createdDocs.map((x: any) => ({ _id: x.id, _rev: x.rev }))) + }) + + describe("document exists", () => { + it("can check existing docs by id", async () => { + const existingDoc = _.sample(documents) + const result = await database.exists(existingDoc!._id!) + + expect(result).toBe(true) + }) + + it("can check non existing docs by id", async () => { + const result = await database.exists(newid()) + + expect(result).toBe(false) + }) + + it("can check an existing doc by id multiple times", async () => { + const existingDoc = _.sample(documents) + const id = existingDoc!._id! + + const results = [] + results.push(await database.exists(id)) + results.push(await database.exists(id)) + results.push(await database.exists(id)) + + expect(results).toEqual([true, true, true]) + }) + + it("returns false after the doc is deleted", async () => { + const existingDoc = _.sample(documents) + const id = existingDoc!._id! + expect(await database.exists(id)).toBe(true) + + await database.remove(existingDoc!) + expect(await database.exists(id)).toBe(false) + }) + }) +}) diff --git a/packages/backend-core/src/db/tests/index.spec.js b/packages/backend-core/src/db/tests/index.spec.js index 0d257f7ed7..e03c9a5b0e 100644 --- a/packages/backend-core/src/db/tests/index.spec.js +++ b/packages/backend-core/src/db/tests/index.spec.js @@ -5,7 +5,6 @@ const { getDB } = require("../db") describe("db", () => { describe("getDB", () => { it("returns a db", async () => { - const dbName = structures.db.id() const db = getDB(dbName) expect(db).toBeDefined() diff --git a/packages/backend-core/src/db/utils.ts b/packages/backend-core/src/db/utils.ts index d7a4b8224a..906a95e1db 100644 --- a/packages/backend-core/src/db/utils.ts +++ b/packages/backend-core/src/db/utils.ts @@ -6,6 +6,7 @@ import { AppState, DeletedApp, getAppMetadata } from "../cache/appMetadata" import { isDevApp, isDevAppID, getProdAppID } from "../docIds/conversions" import { App, Database } from "@budibase/types" import { getStartEndKeyURL } from "../docIds" + export * from "../docIds" /** diff --git a/packages/backend-core/src/db/views.ts b/packages/backend-core/src/db/views.ts index f0980ad217..5d9c5b74d3 100644 --- a/packages/backend-core/src/db/views.ts +++ b/packages/backend-core/src/db/views.ts @@ -7,12 +7,19 @@ import { } from "../constants" import { getGlobalDB } from "../context" import { doWithDB } from "./" -import { AllDocsResponse, Database, DatabaseQueryOpts } from "@budibase/types" +import { + AllDocsResponse, + Database, + DatabaseQueryOpts, + Document, + DesignDocument, + DBView, +} from "@budibase/types" import env from "../environment" const DESIGN_DB = "_design/database" -function DesignDoc() { +function DesignDoc(): DesignDocument { return { _id: DESIGN_DB, // view collation information, read before writing any complex views: @@ -21,20 +28,14 @@ function DesignDoc() { } } -interface DesignDocument { - views: any -} - async function removeDeprecated(db: Database, viewName: ViewName) { - // @ts-ignore if (!DeprecatedViews[viewName]) { return } try { const designDoc = await db.get(DESIGN_DB) - // @ts-ignore for (let deprecatedNames of DeprecatedViews[viewName]) { - delete designDoc.views[deprecatedNames] + delete designDoc.views?.[deprecatedNames] } await db.put(designDoc) } catch (err) { @@ -43,18 +44,18 @@ async function removeDeprecated(db: Database, viewName: ViewName) { } export async function createView( - db: any, + db: Database, viewJs: string, viewName: string ): Promise { let designDoc try { - designDoc = (await db.get(DESIGN_DB)) as DesignDocument + designDoc = await db.get(DESIGN_DB) } catch (err) { // no design doc, make one designDoc = DesignDoc() } - const view = { + const view: DBView = { map: viewJs, } designDoc.views = { @@ -109,7 +110,7 @@ export interface QueryViewOptions { arrayResponse?: boolean } -export async function queryViewRaw( +export async function queryViewRaw( viewName: ViewName, params: DatabaseQueryOpts, db: Database, @@ -137,18 +138,16 @@ export async function queryViewRaw( } } -export const queryView = async ( +export const queryView = async ( viewName: ViewName, params: DatabaseQueryOpts, db: Database, createFunc: any, opts?: QueryViewOptions -): Promise => { +): Promise => { const response = await queryViewRaw(viewName, params, db, createFunc, opts) const rows = response.rows - const docs = rows.map((row: any) => - params.include_docs ? row.doc : row.value - ) + const docs = rows.map(row => (params.include_docs ? row.doc! : row.value)) // if arrayResponse has been requested, always return array regardless of length if (opts?.arrayResponse) { @@ -198,11 +197,11 @@ export const createPlatformUserView = async () => { await createPlatformView(viewJs, ViewName.PLATFORM_USERS_LOWERCASE) } -export const queryPlatformView = async ( +export const queryPlatformView = async ( viewName: ViewName, params: DatabaseQueryOpts, opts?: QueryViewOptions -): Promise => { +): Promise => { const CreateFuncByName: any = { [ViewName.ACCOUNT_BY_EMAIL]: createPlatformAccountEmailView, [ViewName.PLATFORM_USERS_LOWERCASE]: createPlatformUserView, @@ -220,7 +219,7 @@ const CreateFuncByName: any = { [ViewName.USER_BY_APP]: createUserAppView, } -export const queryGlobalView = async ( +export const queryGlobalView = async ( viewName: ViewName, params: DatabaseQueryOpts, db?: Database, @@ -231,10 +230,10 @@ export const queryGlobalView = async ( db = getGlobalDB() } const createFn = CreateFuncByName[viewName] - return queryView(viewName, params, db!, createFn, opts) + return queryView(viewName, params, db!, createFn, opts) } -export async function queryGlobalViewRaw( +export async function queryGlobalViewRaw( viewName: ViewName, params: DatabaseQueryOpts, opts?: QueryViewOptions diff --git a/packages/backend-core/src/docIds/conversions.ts b/packages/backend-core/src/docIds/conversions.ts index b168b74e16..ec43d01389 100644 --- a/packages/backend-core/src/docIds/conversions.ts +++ b/packages/backend-core/src/docIds/conversions.ts @@ -1,5 +1,6 @@ import { APP_DEV_PREFIX, APP_PREFIX } from "../constants" import { App } from "@budibase/types" + const NO_APP_ERROR = "No app provided" export function isDevAppID(appId?: string) { diff --git a/packages/backend-core/src/docIds/ids.ts b/packages/backend-core/src/docIds/ids.ts index 02176109da..9627b2b94c 100644 --- a/packages/backend-core/src/docIds/ids.ts +++ b/packages/backend-core/src/docIds/ids.ts @@ -74,7 +74,7 @@ export function getGlobalIDFromUserMetadataID(id: string) { * Generates a template ID. * @param ownerId The owner/user of the template, this could be global or a workspace level. */ -export function generateTemplateID(ownerId: any) { +export function generateTemplateID(ownerId: string) { return `${DocumentType.TEMPLATE}${SEPARATOR}${ownerId}${SEPARATOR}${newid()}` } @@ -105,7 +105,7 @@ export function prefixRoleID(name: string) { * Generates a new dev info document ID - this is scoped to a user. * @returns The new dev info ID which info for dev (like api key) can be stored under. */ -export const generateDevInfoID = (userId: any) => { +export const generateDevInfoID = (userId: string) => { return `${DocumentType.DEV_INFO}${SEPARATOR}${userId}` } diff --git a/packages/backend-core/src/docUpdates/index.ts b/packages/backend-core/src/docUpdates/index.ts index 3971f8de12..bd34f4f0cd 100644 --- a/packages/backend-core/src/docUpdates/index.ts +++ b/packages/backend-core/src/docUpdates/index.ts @@ -17,13 +17,8 @@ export function init(processors: ProcessorMap) { // if not processing in this instance, kick it off if (!processingPromise) { processingPromise = asyncEventQueue.process(async job => { - const { event, identity, properties, timestamp } = job.data - await documentProcessor.processEvent( - event, - identity, - properties, - timestamp - ) + const { event, identity, properties } = job.data + await documentProcessor.processEvent(event, identity, properties) }) } } diff --git a/packages/backend-core/src/environment.ts b/packages/backend-core/src/environment.ts index ed882fe96a..2da2a77d67 100644 --- a/packages/backend-core/src/environment.ts +++ b/packages/backend-core/src/environment.ts @@ -107,6 +107,7 @@ const environment = { ENCRYPTION_KEY: process.env.ENCRYPTION_KEY, API_ENCRYPTION_KEY: getAPIEncryptionKey(), COUCH_DB_URL: process.env.COUCH_DB_URL || "http://localhost:4005", + COUCH_DB_SQL_URL: process.env.COUCH_DB_SQL_URL || "http://localhost:4984", COUCH_DB_USERNAME: process.env.COUCH_DB_USER, COUCH_DB_PASSWORD: process.env.COUCH_DB_PASSWORD, GOOGLE_CLIENT_ID: process.env.GOOGLE_CLIENT_ID, @@ -165,6 +166,8 @@ const environment = { DISABLE_JWT_WARNING: process.env.DISABLE_JWT_WARNING, BLACKLIST_IPS: process.env.BLACKLIST_IPS, SERVICE_TYPE: "unknown", + PASSWORD_MIN_LENGTH: process.env.PASSWORD_MIN_LENGTH, + PASSWORD_MAX_LENGTH: process.env.PASSWORD_MAX_LENGTH, /** * Enable to allow an admin user to login using a password. * This can be useful to prevent lockout when configuring SSO. @@ -176,12 +179,14 @@ const environment = { ...getPackageJsonFields(), DISABLE_PINO_LOGGER: process.env.DISABLE_PINO_LOGGER, OFFLINE_MODE: process.env.OFFLINE_MODE, + SESSION_EXPIRY_SECONDS: process.env.SESSION_EXPIRY_SECONDS, _set(key: any, value: any) { process.env[key] = value // @ts-ignore environment[key] = value }, ROLLING_LOG_MAX_SIZE: process.env.ROLLING_LOG_MAX_SIZE || "10M", + DISABLE_SCIM_CALLS: process.env.DISABLE_SCIM_CALLS, } // clean up any environment variable edge cases diff --git a/packages/backend-core/src/events/events.ts b/packages/backend-core/src/events/events.ts index f02b9fdf32..92b81553b0 100644 --- a/packages/backend-core/src/events/events.ts +++ b/packages/backend-core/src/events/events.ts @@ -1,4 +1,4 @@ -import { Event } from "@budibase/types" +import { Event, Identity } from "@budibase/types" import { processors } from "./processors" import identification from "./identification" import * as backfill from "./backfill" @@ -7,12 +7,19 @@ import { publishAsyncEvent } from "./asyncEvents" export const publishEvent = async ( event: Event, properties: any, - timestamp?: string | number + timestamp?: string | number, + identityOverride?: Identity ) => { // in future this should use async events via a distributed queue. - const identity = await identification.getCurrentIdentity() + const identity = + identityOverride || (await identification.getCurrentIdentity()) + + // Backfilling is get from the user cache, but when we override the identity cache is not available. Overrides are + // normally performed in automatic actions or operations in async flows (BPM) where the user session is not available. + const backfilling = identityOverride + ? false + : await backfill.isBackfillingEvent(event) - const backfilling = await backfill.isBackfillingEvent(event) // no backfill - send the event and exit if (!backfilling) { // send off async events if required diff --git a/packages/backend-core/src/events/processors/AuditLogsProcessor.ts b/packages/backend-core/src/events/processors/AuditLogsProcessor.ts index 94b4e1b09f..3dd2ab9d10 100644 --- a/packages/backend-core/src/events/processors/AuditLogsProcessor.ts +++ b/packages/backend-core/src/events/processors/AuditLogsProcessor.ts @@ -1,7 +1,6 @@ import { Event, Identity, - Group, IdentityType, AuditLogQueueEvent, AuditLogFn, @@ -79,11 +78,11 @@ export default class AuditLogsProcessor implements EventProcessor { } } - async identify(identity: Identity, timestamp?: string | number) { + async identify() { // no-op } - async identifyGroup(group: Group, timestamp?: string | number) { + async identifyGroup() { // no-op } diff --git a/packages/backend-core/src/events/processors/LoggingProcessor.ts b/packages/backend-core/src/events/processors/LoggingProcessor.ts index 0f4d02b99c..9f2dc5a473 100644 --- a/packages/backend-core/src/events/processors/LoggingProcessor.ts +++ b/packages/backend-core/src/events/processors/LoggingProcessor.ts @@ -8,8 +8,7 @@ export default class LoggingProcessor implements EventProcessor { async processEvent( event: Event, identity: Identity, - properties: any, - timestamp?: string + properties: any ): Promise { if (skipLogging) { return @@ -17,14 +16,14 @@ export default class LoggingProcessor implements EventProcessor { console.log(`[audit] [identityType=${identity.type}] ${event}`, properties) } - async identify(identity: Identity, timestamp?: string | number) { + async identify(identity: Identity) { if (skipLogging) { return } console.log(`[audit] identified`, identity) } - async identifyGroup(group: Group, timestamp?: string | number) { + async identifyGroup(group: Group) { if (skipLogging) { return } diff --git a/packages/backend-core/src/events/processors/async/DocumentUpdateProcessor.ts b/packages/backend-core/src/events/processors/async/DocumentUpdateProcessor.ts index 54304ee21b..92afcdc637 100644 --- a/packages/backend-core/src/events/processors/async/DocumentUpdateProcessor.ts +++ b/packages/backend-core/src/events/processors/async/DocumentUpdateProcessor.ts @@ -14,12 +14,7 @@ export default class DocumentUpdateProcessor implements EventProcessor { this.processors = processors } - async processEvent( - event: Event, - identity: Identity, - properties: any, - timestamp?: string | number - ) { + async processEvent(event: Event, identity: Identity, properties: any) { const tenantId = identity.realTenantId const docId = getDocumentId(event, properties) if (!tenantId || !docId) { diff --git a/packages/backend-core/src/events/processors/posthog/index.ts b/packages/backend-core/src/events/processors/posthog/index.ts index dceb10d2cd..5a2b1afc9f 100644 --- a/packages/backend-core/src/events/processors/posthog/index.ts +++ b/packages/backend-core/src/events/processors/posthog/index.ts @@ -1,2 +1,3 @@ import PosthogProcessor from "./PosthogProcessor" + export default PosthogProcessor diff --git a/packages/backend-core/src/events/processors/posthog/tests/PosthogProcessor.spec.ts b/packages/backend-core/src/events/processors/posthog/tests/PosthogProcessor.spec.ts index 0722fc3293..d9a5504073 100644 --- a/packages/backend-core/src/events/processors/posthog/tests/PosthogProcessor.spec.ts +++ b/packages/backend-core/src/events/processors/posthog/tests/PosthogProcessor.spec.ts @@ -1,7 +1,9 @@ import { testEnv } from "../../../../../tests/extra" import PosthogProcessor from "../PosthogProcessor" import { Event, IdentityType, Hosting } from "@budibase/types" + const tk = require("timekeeper") + import * as cache from "../../../../cache/generic" import { CacheKey } from "../../../../cache/generic" import * as context from "../../../../context" diff --git a/packages/backend-core/src/events/publishers/account.ts b/packages/backend-core/src/events/publishers/account.ts index d337e404ef..99767962dd 100644 --- a/packages/backend-core/src/events/publishers/account.ts +++ b/packages/backend-core/src/events/publishers/account.ts @@ -5,13 +5,19 @@ import { AccountCreatedEvent, AccountDeletedEvent, AccountVerifiedEvent, + Identity, } from "@budibase/types" -async function created(account: Account) { +async function created(account: Account, identityOverride?: Identity) { const properties: AccountCreatedEvent = { tenantId: account.tenantId, } - await publishEvent(Event.ACCOUNT_CREATED, properties) + await publishEvent( + Event.ACCOUNT_CREATED, + properties, + undefined, + identityOverride + ) } async function deleted(account: Account) { diff --git a/packages/backend-core/src/events/publishers/app.ts b/packages/backend-core/src/events/publishers/app.ts index d08d59b5f1..af26b09e72 100644 --- a/packages/backend-core/src/events/publishers/app.ts +++ b/packages/backend-core/src/events/publishers/app.ts @@ -13,6 +13,7 @@ import { AppVersionRevertedEvent, AppRevertedEvent, AppExportedEvent, + AppDuplicatedEvent, } from "@budibase/types" const created = async (app: App, timestamp?: string | number) => { @@ -77,6 +78,17 @@ async function fileImported(app: App) { await publishEvent(Event.APP_FILE_IMPORTED, properties) } +async function duplicated(app: App, duplicateAppId: string) { + const properties: AppDuplicatedEvent = { + duplicateAppId, + appId: app.appId, + audited: { + name: app.name, + }, + } + await publishEvent(Event.APP_DUPLICATED, properties) +} + async function templateImported(app: App, templateKey: string) { const properties: AppTemplateImportedEvent = { appId: app.appId, @@ -147,6 +159,7 @@ export default { published, unpublished, fileImported, + duplicated, templateImported, versionUpdated, versionReverted, diff --git a/packages/backend-core/src/events/publishers/query.ts b/packages/backend-core/src/events/publishers/query.ts index 7d28129cf6..48603257d2 100644 --- a/packages/backend-core/src/events/publishers/query.ts +++ b/packages/backend-core/src/events/publishers/query.ts @@ -3,6 +3,7 @@ import { Event, Datasource, Query, + QueryPreview, QueryCreatedEvent, QueryUpdatedEvent, QueryDeletedEvent, @@ -68,9 +69,9 @@ const run = async (count: number, timestamp?: string | number) => { await publishEvent(Event.QUERIES_RUN, properties, timestamp) } -const previewed = async (datasource: Datasource, query: Query) => { +const previewed = async (datasource: Datasource, query: QueryPreview) => { const properties: QueryPreviewedEvent = { - queryId: query._id, + queryId: query.queryId, datasourceId: datasource._id as string, source: datasource.source, queryVerb: query.queryVerb, diff --git a/packages/backend-core/src/features/index.ts b/packages/backend-core/src/features/index.ts index 8f5c903e05..ad517082de 100644 --- a/packages/backend-core/src/features/index.ts +++ b/packages/backend-core/src/features/index.ts @@ -1,5 +1,6 @@ import env from "../environment" import * as context from "../context" + export * from "./installation" /** diff --git a/packages/backend-core/src/index.ts b/packages/backend-core/src/index.ts index c7cf9f56cc..8001017092 100644 --- a/packages/backend-core/src/index.ts +++ b/packages/backend-core/src/index.ts @@ -2,6 +2,7 @@ export * as configs from "./configs" export * as events from "./events" export * as migrations from "./migrations" export * as users from "./users" +export * as userUtils from "./users/utils" export * as roles from "./security/roles" export * as permissions from "./security/permissions" export * as accounts from "./accounts" @@ -32,11 +33,14 @@ export * as blacklist from "./blacklist" export * as docUpdates from "./docUpdates" export * from "./utils/Duration" export { SearchParams } from "./db" +export * as docIds from "./docIds" +export * as security from "./security" // Add context to tenancy for backwards compatibility // only do this for external usages to prevent internal // circular dependencies import * as context from "./context" import * as _tenancy from "./tenancy" + export const tenancy = { ..._tenancy, ...context, @@ -50,6 +54,7 @@ export * from "./constants" // expose package init function import * as db from "./db" + export const init = (opts: any = {}) => { db.init(opts.db) } diff --git a/packages/backend-core/src/installation.ts b/packages/backend-core/src/installation.ts index 17eda2004d..83166880cc 100644 --- a/packages/backend-core/src/installation.ts +++ b/packages/backend-core/src/installation.ts @@ -1,12 +1,12 @@ import { newid } from "./utils" import * as events from "./events" -import { StaticDatabases } from "./db" -import { doWithDB } from "./db" +import { StaticDatabases, doWithDB } from "./db" import { Installation, IdentityType, Database } from "@budibase/types" import * as context from "./context" import semver from "semver" import { bustCache, withCache, TTL, CacheKey } from "./cache/generic" import environment from "./environment" +import { logAlert } from "./logging" export const getInstall = async (): Promise => { return withCache(CacheKey.INSTALLATION, TTL.ONE_DAY, getInstallFromDB, { @@ -81,27 +81,35 @@ export const checkInstallVersion = async (): Promise => { const currentVersion = install.version const newVersion = environment.VERSION - if (currentVersion !== newVersion) { - const isUpgrade = semver.gt(newVersion, currentVersion) - const isDowngrade = semver.lt(newVersion, currentVersion) + try { + if (currentVersion !== newVersion) { + const isUpgrade = semver.gt(newVersion, currentVersion) + const isDowngrade = semver.lt(newVersion, currentVersion) - const success = await updateVersion(newVersion) + const success = await updateVersion(newVersion) - if (success) { - await context.doInIdentityContext( - { - _id: install.installId, - type: IdentityType.INSTALLATION, - }, - async () => { - if (isUpgrade) { - await events.installation.upgraded(currentVersion, newVersion) - } else if (isDowngrade) { - await events.installation.downgraded(currentVersion, newVersion) + if (success) { + await context.doInIdentityContext( + { + _id: install.installId, + type: IdentityType.INSTALLATION, + }, + async () => { + if (isUpgrade) { + await events.installation.upgraded(currentVersion, newVersion) + } else if (isDowngrade) { + await events.installation.downgraded(currentVersion, newVersion) + } } - } - ) - await events.identification.identifyInstallationGroup(install.installId) + ) + await events.identification.identifyInstallationGroup(install.installId) + } + } + } catch (err: any) { + if (err?.message?.includes("Invalid Version")) { + logAlert(`Invalid version "${newVersion}" - is it semver?`) + } else { + logAlert("Failed to retrieve version", err) } } } diff --git a/packages/backend-core/src/logging/correlation/correlation.ts b/packages/backend-core/src/logging/correlation/correlation.ts index b5b47df9c6..13cc7aff8f 100644 --- a/packages/backend-core/src/logging/correlation/correlation.ts +++ b/packages/backend-core/src/logging/correlation/correlation.ts @@ -1,11 +1,13 @@ import { Header } from "../../constants" + const correlator = require("correlation-id") -export const setHeader = (headers: any) => { +export const setHeader = (headers: Record) => { const correlationId = correlator.getId() - if (correlationId) { - headers[Header.CORRELATION_ID] = correlationId + if (!correlationId) { + return } + headers[Header.CORRELATION_ID] = correlationId } export function getId() { diff --git a/packages/backend-core/src/logging/correlation/middleware.ts b/packages/backend-core/src/logging/correlation/middleware.ts index f77714a5ae..45baee1fb1 100644 --- a/packages/backend-core/src/logging/correlation/middleware.ts +++ b/packages/backend-core/src/logging/correlation/middleware.ts @@ -1,5 +1,6 @@ import { Header } from "../../constants" import { v4 as uuid } from "uuid" + const correlator = require("correlation-id") const correlation = (ctx: any, next: any) => { diff --git a/packages/backend-core/src/logging/pino/logger.ts b/packages/backend-core/src/logging/pino/logger.ts index 7c444a3a59..0a8470a453 100644 --- a/packages/backend-core/src/logging/pino/logger.ts +++ b/packages/backend-core/src/logging/pino/logger.ts @@ -5,9 +5,23 @@ import { IdentityType } from "@budibase/types" import env from "../../environment" import * as context from "../../context" import * as correlation from "../correlation" +import tracer from "dd-trace" +import { formats } from "dd-trace/ext" import { localFileDestination } from "../system" +function isPlainObject(obj: any) { + return typeof obj === "object" && obj !== null && !(obj instanceof Error) +} + +function isError(obj: any) { + return obj instanceof Error +} + +function isMessage(obj: any) { + return typeof obj === "string" +} + // LOGGER let pinoInstance: pino.Logger | undefined @@ -69,23 +83,11 @@ if (!env.DISABLE_PINO_LOGGER) { err?: Error } - function isPlainObject(obj: any) { - return typeof obj === "object" && obj !== null && !(obj instanceof Error) - } - - function isError(obj: any) { - return obj instanceof Error - } - - function isMessage(obj: any) { - return typeof obj === "string" - } - /** * Backwards compatibility between console logging statements * and pino logging requirements. */ - function getLogParams(args: any[]): [MergingObject, string] { + const getLogParams = (args: any[]): [MergingObject, string] => { let error = undefined let objects: any[] = [] let message = "" @@ -115,6 +117,11 @@ if (!env.DISABLE_PINO_LOGGER) { correlationId: correlation.getId(), } + const span = tracer.scope().active() + if (span) { + tracer.inject(span.context(), formats.LOG, contextObject) + } + const mergingObject: any = { err: error, pid: process.pid, diff --git a/packages/backend-core/src/logging/pino/middleware.ts b/packages/backend-core/src/logging/pino/middleware.ts index 569420c5f2..df18a35eb1 100644 --- a/packages/backend-core/src/logging/pino/middleware.ts +++ b/packages/backend-core/src/logging/pino/middleware.ts @@ -1,9 +1,12 @@ import env from "../../environment" import { logger } from "./logger" import { IncomingMessage } from "http" + const pino = require("koa-pino-logger") + import { Options } from "pino-http" import { Ctx } from "@budibase/types" + const correlator = require("correlation-id") export function pinoSettings(): Options { diff --git a/packages/backend-core/src/middleware/authenticated.ts b/packages/backend-core/src/middleware/authenticated.ts index 8bd6591d05..d357dbdbdc 100644 --- a/packages/backend-core/src/middleware/authenticated.ts +++ b/packages/backend-core/src/middleware/authenticated.ts @@ -13,8 +13,9 @@ import { getGlobalDB, doInTenant } from "../context" import { decrypt } from "../security/encryption" import * as identity from "../context/identity" import env from "../environment" -import { Ctx, EndpointMatcher } from "@budibase/types" +import { Ctx, EndpointMatcher, SessionCookie } from "@budibase/types" import { InvalidAPIKeyError, ErrorCode } from "../errors" +import tracer from "dd-trace" const ONE_MINUTE = env.SESSION_UPDATE_PERIOD ? parseInt(env.SESSION_UPDATE_PERIOD) @@ -98,7 +99,9 @@ export default function ( // check the actual user is authenticated first, try header or cookie let headerToken = ctx.request.headers[Header.TOKEN] - const authCookie = getCookie(ctx, Cookie.Auth) || openJwt(headerToken) + const authCookie = + getCookie(ctx, Cookie.Auth) || + openJwt(headerToken) let apiKey = ctx.request.headers[Header.API_KEY] if (!apiKey && ctx.request.headers[Header.AUTHORIZATION]) { @@ -164,6 +167,16 @@ export default function ( if (!authenticated) { authenticated = false } + + if (user) { + tracer.setUser({ + id: user?._id, + tenantId: user?.tenantId, + budibaseAccess: user?.budibaseAccess, + status: user?.status, + }) + } + // isAuthenticated is a function, so use a variable to be able to check authed state finalise(ctx, { authenticated, user, internal, version, publicEndpoint }) diff --git a/packages/backend-core/src/middleware/errorHandling.ts b/packages/backend-core/src/middleware/errorHandling.ts index ebdd4107e9..2b8f7195ed 100644 --- a/packages/backend-core/src/middleware/errorHandling.ts +++ b/packages/backend-core/src/middleware/errorHandling.ts @@ -1,5 +1,6 @@ import { APIError } from "@budibase/types" import * as errors from "../errors" +import environment from "../environment" export async function errorHandling(ctx: any, next: any) { try { @@ -14,15 +15,19 @@ export async function errorHandling(ctx: any, next: any) { console.error(err) } - const error = errors.getPublicError(err) - const body: APIError = { + let error: APIError = { message: err.message, status: status, validationErrors: err.validation, - error, + error: errors.getPublicError(err), } - ctx.body = body + if (environment.isTest() && ctx.headers["x-budibase-include-stacktrace"]) { + // @ts-ignore + error.stack = err.stack + } + + ctx.body = error } } diff --git a/packages/backend-core/src/middleware/index.ts b/packages/backend-core/src/middleware/index.ts index 980bf06b00..e1eb7f1d26 100644 --- a/packages/backend-core/src/middleware/index.ts +++ b/packages/backend-core/src/middleware/index.ts @@ -2,6 +2,7 @@ export * as local from "./passport/local" export * as google from "./passport/sso/google" export * as oidc from "./passport/sso/oidc" import * as datasourceGoogle from "./passport/datasource/google" + export const datasource = { google: datasourceGoogle, } diff --git a/packages/backend-core/src/middleware/joi-validator.ts b/packages/backend-core/src/middleware/joi-validator.ts index fcc8316886..ac8064a512 100644 --- a/packages/backend-core/src/middleware/joi-validator.ts +++ b/packages/backend-core/src/middleware/joi-validator.ts @@ -1,12 +1,12 @@ -import Joi, { ObjectSchema } from "joi" -import { BBContext } from "@budibase/types" +import Joi from "joi" +import { Ctx } from "@budibase/types" function validate( schema: Joi.ObjectSchema | Joi.ArraySchema, property: string ) { // Return a Koa middleware function - return (ctx: BBContext, next: any) => { + return (ctx: Ctx, next: any) => { if (!schema) { return next() } @@ -30,7 +30,6 @@ function validate( const { error } = schema.validate(params) if (error) { ctx.throw(400, `Invalid ${property} - ${error.message}`) - return } return next() } diff --git a/packages/backend-core/src/middleware/matchers.ts b/packages/backend-core/src/middleware/matchers.ts index efbdec2dbe..757d93a60d 100644 --- a/packages/backend-core/src/middleware/matchers.ts +++ b/packages/backend-core/src/middleware/matchers.ts @@ -11,7 +11,6 @@ export const buildMatcherRegex = ( return patterns.map(pattern => { let route = pattern.route const method = pattern.method - const strict = pattern.strict ? pattern.strict : false // if there is a param in the route // use a wildcard pattern @@ -24,24 +23,17 @@ export const buildMatcherRegex = ( } } - return { regex: new RegExp(route), method, strict, route } + return { regex: new RegExp(route), method, route } }) } export const matches = (ctx: BBContext, options: RegexMatcher[]) => { - return options.find(({ regex, method, strict, route }) => { - let urlMatch - if (strict) { - urlMatch = ctx.request.url === route - } else { - urlMatch = regex.test(ctx.request.url) - } - + return options.find(({ regex, method }) => { + const urlMatch = regex.test(ctx.request.url) const methodMatch = method === "ALL" ? true : ctx.request.method.toLowerCase() === method.toLowerCase() - return urlMatch && methodMatch }) } diff --git a/packages/backend-core/src/middleware/passport/datasource/google.ts b/packages/backend-core/src/middleware/passport/datasource/google.ts index ae6b3b4913..7f768f1623 100644 --- a/packages/backend-core/src/middleware/passport/datasource/google.ts +++ b/packages/backend-core/src/middleware/passport/datasource/google.ts @@ -58,7 +58,14 @@ export async function postAuth( const platformUrl = await configs.getPlatformUrl({ tenantAware: false }) let callbackUrl = `${platformUrl}/api/global/auth/datasource/google/callback` - const authStateCookie = utils.getCookie(ctx, Cookie.DatasourceAuth) + const authStateCookie = utils.getCookie<{ appId: string }>( + ctx, + Cookie.DatasourceAuth + ) + + if (!authStateCookie) { + throw new Error("Unable to fetch datasource auth cookie") + } return passport.authenticate( new GoogleStrategy( diff --git a/packages/backend-core/src/middleware/passport/sso/google.ts b/packages/backend-core/src/middleware/passport/sso/google.ts index ad7593e63d..2a08ad7665 100644 --- a/packages/backend-core/src/middleware/passport/sso/google.ts +++ b/packages/backend-core/src/middleware/passport/sso/google.ts @@ -8,6 +8,7 @@ import { SaveSSOUserFunction, GoogleInnerConfig, } from "@budibase/types" + const GoogleStrategy = require("passport-google-oauth").OAuth2Strategy export function buildVerifyFn(saveUserFn: SaveSSOUserFunction) { diff --git a/packages/backend-core/src/middleware/passport/sso/sso.ts b/packages/backend-core/src/middleware/passport/sso/sso.ts index 2fc1184722..ee84f03dae 100644 --- a/packages/backend-core/src/middleware/passport/sso/sso.ts +++ b/packages/backend-core/src/middleware/passport/sso/sso.ts @@ -5,7 +5,6 @@ import * as context from "../../../context" import fetch from "node-fetch" import { SaveSSOUserFunction, - SaveUserOpts, SSOAuthDetails, SSOUser, User, @@ -14,10 +13,8 @@ import { // no-op function for user save // - this allows datasource auth and access token refresh to work correctly // - prefer no-op over an optional argument to ensure function is provided to login flows -export const ssoSaveUserNoOp: SaveSSOUserFunction = ( - user: SSOUser, - opts: SaveUserOpts -) => Promise.resolve(user) +export const ssoSaveUserNoOp: SaveSSOUserFunction = (user: SSOUser) => + Promise.resolve(user) /** * Common authentication logic for third parties. e.g. OAuth, OIDC. diff --git a/packages/backend-core/src/middleware/passport/sso/tests/google.spec.ts b/packages/backend-core/src/middleware/passport/sso/tests/google.spec.ts index d0689a1f0a..9bf855b3c5 100644 --- a/packages/backend-core/src/middleware/passport/sso/tests/google.spec.ts +++ b/packages/backend-core/src/middleware/passport/sso/tests/google.spec.ts @@ -6,6 +6,7 @@ const mockStrategy = require("passport-google-oauth").OAuth2Strategy jest.mock("../sso") import * as _sso from "../sso" + const sso = jest.mocked(_sso) const mockSaveUserFn = jest.fn() diff --git a/packages/backend-core/src/middleware/passport/sso/tests/sso.spec.ts b/packages/backend-core/src/middleware/passport/sso/tests/sso.spec.ts index c3ddf220e6..ea9584c284 100644 --- a/packages/backend-core/src/middleware/passport/sso/tests/sso.spec.ts +++ b/packages/backend-core/src/middleware/passport/sso/tests/sso.spec.ts @@ -11,6 +11,7 @@ const mockSaveUser = jest.fn() jest.mock("../../../../users") import * as _users from "../../../../users" + const users = jest.mocked(_users) const getErrorMessage = () => { @@ -113,11 +114,11 @@ describe("sso", () => { // tenant id added ssoUser.tenantId = context.getTenantId() - expect(mockSaveUser).toBeCalledWith(ssoUser, { + expect(mockSaveUser).toHaveBeenCalledWith(ssoUser, { hashPassword: false, requirePassword: false, }) - expect(mockDone).toBeCalledWith(null, ssoUser) + expect(mockDone).toHaveBeenCalledWith(null, ssoUser) }) }) }) @@ -158,11 +159,11 @@ describe("sso", () => { // existing id preserved ssoUser._id = existingUser._id - expect(mockSaveUser).toBeCalledWith(ssoUser, { + expect(mockSaveUser).toHaveBeenCalledWith(ssoUser, { hashPassword: false, requirePassword: false, }) - expect(mockDone).toBeCalledWith(null, ssoUser) + expect(mockDone).toHaveBeenCalledWith(null, ssoUser) }) }) @@ -186,11 +187,11 @@ describe("sso", () => { // existing id preserved ssoUser._id = existingUser._id - expect(mockSaveUser).toBeCalledWith(ssoUser, { + expect(mockSaveUser).toHaveBeenCalledWith(ssoUser, { hashPassword: false, requirePassword: false, }) - expect(mockDone).toBeCalledWith(null, ssoUser) + expect(mockDone).toHaveBeenCalledWith(null, ssoUser) }) }) }) diff --git a/packages/backend-core/src/middleware/tests/builder.spec.ts b/packages/backend-core/src/middleware/tests/builder.spec.ts index d350eff4f6..0f35b0b833 100644 --- a/packages/backend-core/src/middleware/tests/builder.spec.ts +++ b/packages/backend-core/src/middleware/tests/builder.spec.ts @@ -5,6 +5,7 @@ import { structures } from "../../../tests" import { ContextUser, ServiceType } from "@budibase/types" import { doInAppContext } from "../../context" import env from "../../environment" + env._set("SERVICE_TYPE", ServiceType.APPS) const appId = "app_aaa" @@ -23,13 +24,13 @@ function buildUserCtx(user: ContextUser) { } function passed(throwFn: jest.Func, nextFn: jest.Func) { - expect(throwFn).not.toBeCalled() - expect(nextFn).toBeCalled() + expect(throwFn).not.toHaveBeenCalled() + expect(nextFn).toHaveBeenCalled() } function threw(throwFn: jest.Func) { // cant check next, the throw function doesn't actually throw - so it still continues - expect(throwFn).toBeCalled() + expect(throwFn).toHaveBeenCalled() } describe("adminOnly middleware", () => { diff --git a/packages/backend-core/src/middleware/tests/matchers.spec.ts b/packages/backend-core/src/middleware/tests/matchers.spec.ts index c39bbb6dd3..1b79db2e68 100644 --- a/packages/backend-core/src/middleware/tests/matchers.spec.ts +++ b/packages/backend-core/src/middleware/tests/matchers.spec.ts @@ -34,23 +34,6 @@ describe("matchers", () => { expect(!!matchers.matches(ctx, built)).toBe(true) }) - it("doesn't wildcard path with strict", () => { - const pattern = [ - { - route: "/api/tests", - method: "POST", - strict: true, - }, - ] - const ctx = structures.koa.newContext() - ctx.request.url = "/api/tests/id/something/else" - ctx.request.method = "POST" - - const built = matchers.buildMatcherRegex(pattern) - - expect(!!matchers.matches(ctx, built)).toBe(false) - }) - it("matches with param", () => { const pattern = [ { @@ -67,23 +50,6 @@ describe("matchers", () => { expect(!!matchers.matches(ctx, built)).toBe(true) }) - // TODO: Support the below behaviour - // Strict does not work when a param is present - // it("matches with param with strict", () => { - // const pattern = [{ - // route: "/api/tests/:testId", - // method: "GET", - // strict: true - // }] - // const ctx = structures.koa.newContext() - // ctx.request.url = "/api/tests/id" - // ctx.request.method = "GET" - // - // const built = matchers.buildMatcherRegex(pattern) - // - // expect(!!matchers.matches(ctx, built)).toBe(true) - // }) - it("doesn't match by path", () => { const pattern = [ { diff --git a/packages/backend-core/src/migrations/migrations.ts b/packages/backend-core/src/migrations/migrations.ts index c750bc4882..3f033b8cdb 100644 --- a/packages/backend-core/src/migrations/migrations.ts +++ b/packages/backend-core/src/migrations/migrations.ts @@ -45,10 +45,6 @@ export const runMigration = async ( options: MigrationOptions = {} ) => { const migrationType = migration.type - let tenantId: string | undefined - if (migrationType !== MigrationType.INSTALLATION) { - tenantId = context.getTenantId() - } const migrationName = migration.name const silent = migration.silent diff --git a/packages/backend-core/src/objectStore/buckets/plugins.ts b/packages/backend-core/src/objectStore/buckets/plugins.ts index 6f1b7116ae..02be9345ab 100644 --- a/packages/backend-core/src/objectStore/buckets/plugins.ts +++ b/packages/backend-core/src/objectStore/buckets/plugins.ts @@ -6,7 +6,7 @@ import { Plugin } from "@budibase/types" // URLS -export function enrichPluginURLs(plugins: Plugin[]) { +export function enrichPluginURLs(plugins?: Plugin[]): Plugin[] { if (!plugins || !plugins.length) { return [] } diff --git a/packages/backend-core/src/objectStore/buckets/tests/app.spec.ts b/packages/backend-core/src/objectStore/buckets/tests/app.spec.ts index cbbbee6255..4a132ce54d 100644 --- a/packages/backend-core/src/objectStore/buckets/tests/app.spec.ts +++ b/packages/backend-core/src/objectStore/buckets/tests/app.spec.ts @@ -126,7 +126,7 @@ describe("app", () => { it("gets url with embedded minio", async () => { testEnv.withMinio() - await testEnv.withTenant(tenantId => { + await testEnv.withTenant(() => { const url = getAppFileUrl() expect(url).toBe( "/files/signed/prod-budi-app-assets/app_123/attachments/image.jpeg" @@ -136,7 +136,7 @@ describe("app", () => { it("gets url with custom S3", async () => { testEnv.withS3() - await testEnv.withTenant(tenantId => { + await testEnv.withTenant(() => { const url = getAppFileUrl() expect(url).toBe( "http://s3.example.com/prod-budi-app-assets/app_123/attachments/image.jpeg" @@ -146,7 +146,7 @@ describe("app", () => { it("gets url with cloudfront + s3", async () => { testEnv.withCloudfront() - await testEnv.withTenant(tenantId => { + await testEnv.withTenant(() => { const url = getAppFileUrl() // omit rest of signed params expect( diff --git a/packages/backend-core/src/objectStore/cloudfront.ts b/packages/backend-core/src/objectStore/cloudfront.ts index 866fe9e880..3bca97d11e 100644 --- a/packages/backend-core/src/objectStore/cloudfront.ts +++ b/packages/backend-core/src/objectStore/cloudfront.ts @@ -23,7 +23,7 @@ const getCloudfrontSignParams = () => { return { keypairId: env.CLOUDFRONT_PUBLIC_KEY_ID!, privateKeyString: getPrivateKey(), - expireTime: new Date().getTime() + 1000 * 60 * 60, // 1 hour + expireTime: new Date().getTime() + 1000 * 60 * 60 * 24, // 1 day } } diff --git a/packages/backend-core/src/objectStore/objectStore.ts b/packages/backend-core/src/objectStore/objectStore.ts index 76d2dd6689..8d18fb97fd 100644 --- a/packages/backend-core/src/objectStore/objectStore.ts +++ b/packages/backend-core/src/objectStore/objectStore.ts @@ -1,4 +1,5 @@ const sanitize = require("sanitize-s3-objectkey") + import AWS from "aws-sdk" import stream, { Readable } from "stream" import fetch from "node-fetch" @@ -6,7 +7,7 @@ import tar from "tar-fs" import zlib from "zlib" import { promisify } from "util" import { join } from "path" -import fs from "fs" +import fs, { ReadStream } from "fs" import env from "../environment" import { budibaseTempDir } from "./utils" import { v4 } from "uuid" @@ -183,7 +184,7 @@ export async function upload({ export async function streamUpload( bucketName: string, filename: string, - stream: any, + stream: ReadStream | ReadableStream, extra = {} ) { const objectStore = ObjectStore(bucketName) @@ -254,7 +255,8 @@ export async function listAllObjects(bucketName: string, path: string) { objects = objects.concat(response.Contents) } isTruncated = !!response.IsTruncated - } while (isTruncated) + token = response.NextContinuationToken + } while (isTruncated && token) return objects } @@ -304,20 +306,33 @@ export async function retrieveDirectory(bucketName: string, path: string) { let writePath = join(budibaseTempDir(), v4()) fs.mkdirSync(writePath) const objects = await listAllObjects(bucketName, path) - let fullObjects = await Promise.all( - objects.map(obj => retrieve(bucketName, obj.Key!)) + let streams = await Promise.all( + objects.map(obj => getReadStream(bucketName, obj.Key!)) ) let count = 0 + const writePromises: Promise[] = [] for (let obj of objects) { const filename = obj.Key! - const data = fullObjects[count++] + const stream = streams[count++] const possiblePath = filename.split("/") - if (possiblePath.length > 1) { - const dirs = possiblePath.slice(0, possiblePath.length - 1) - fs.mkdirSync(join(writePath, ...dirs), { recursive: true }) + const dirs = possiblePath.slice(0, possiblePath.length - 1) + const possibleDir = join(writePath, ...dirs) + if (possiblePath.length > 1 && !fs.existsSync(possibleDir)) { + fs.mkdirSync(possibleDir, { recursive: true }) } - fs.writeFileSync(join(writePath, ...possiblePath), data) + const writeStream = fs.createWriteStream(join(writePath, ...possiblePath), { + mode: 0o644, + }) + stream.pipe(writeStream) + writePromises.push( + new Promise((resolve, reject) => { + stream.on("finish", resolve) + stream.on("error", reject) + writeStream.on("error", reject) + }) + ) } + await Promise.all(writePromises) return writePath } diff --git a/packages/backend-core/src/objectStore/utils.ts b/packages/backend-core/src/objectStore/utils.ts index dba5f3d1c2..4c3a84ba91 100644 --- a/packages/backend-core/src/objectStore/utils.ts +++ b/packages/backend-core/src/objectStore/utils.ts @@ -18,8 +18,12 @@ export const ObjectStoreBuckets = { } const bbTmp = join(tmpdir(), ".budibase") -if (!fs.existsSync(bbTmp)) { +try { fs.mkdirSync(bbTmp) +} catch (e: any) { + if (e.code !== "EEXIST") { + throw e + } } export function budibaseTempDir() { diff --git a/packages/backend-core/src/platform/tests/tenants.spec.ts b/packages/backend-core/src/platform/tests/tenants.spec.ts index b2ab75c954..e22003fd45 100644 --- a/packages/backend-core/src/platform/tests/tenants.spec.ts +++ b/packages/backend-core/src/platform/tests/tenants.spec.ts @@ -3,7 +3,7 @@ import { DBTestConfiguration } from "../../../tests/extra" import * as tenants from "../tenants" describe("tenants", () => { - const config = new DBTestConfiguration() + new DBTestConfiguration() describe("addTenant", () => { it("concurrently adds multiple tenants safely", async () => { diff --git a/packages/backend-core/src/platform/users.ts b/packages/backend-core/src/platform/users.ts index 6f030afb7c..ccaad76b19 100644 --- a/packages/backend-core/src/platform/users.ts +++ b/packages/backend-core/src/platform/users.ts @@ -20,7 +20,7 @@ export async function lookupTenantId(userId: string) { return user.tenantId } -async function getUserDoc(emailOrId: string): Promise { +export async function getUserDoc(emailOrId: string): Promise { const db = getPlatformDB() return db.get(emailOrId) } @@ -79,6 +79,17 @@ async function addUserDoc(emailOrId: string, newDocFn: () => PlatformUser) { } } +export async function addSsoUser( + ssoId: string, + email: string, + userId: string, + tenantId: string +) { + return addUserDoc(ssoId, () => + newUserSsoIdDoc(ssoId, email, userId, tenantId) + ) +} + export async function addUser( tenantId: string, userId: string, @@ -91,9 +102,7 @@ export async function addUser( ] if (ssoId) { - promises.push( - addUserDoc(ssoId, () => newUserSsoIdDoc(ssoId, email, userId, tenantId)) - ) + promises.push(addSsoUser(ssoId, email, userId, tenantId)) } await Promise.all(promises) diff --git a/packages/backend-core/src/queue/constants.ts b/packages/backend-core/src/queue/constants.ts index e1ffcfee36..a095c6c769 100644 --- a/packages/backend-core/src/queue/constants.ts +++ b/packages/backend-core/src/queue/constants.ts @@ -3,4 +3,6 @@ export enum JobQueue { APP_BACKUP = "appBackupQueue", AUDIT_LOG = "auditLogQueue", SYSTEM_EVENT_QUEUE = "systemEventQueue", + APP_MIGRATION = "appMigration", + DOC_WRITETHROUGH_QUEUE = "docWritethroughQueue", } diff --git a/packages/backend-core/src/queue/inMemoryQueue.ts b/packages/backend-core/src/queue/inMemoryQueue.ts index a8add7ecb6..87e43b324d 100644 --- a/packages/backend-core/src/queue/inMemoryQueue.ts +++ b/packages/backend-core/src/queue/inMemoryQueue.ts @@ -1,5 +1,14 @@ import events from "events" -import { timeout } from "../utils" +import { newid, timeout } from "../utils" +import { Queue, QueueOptions, JobOptions } from "./queue" + +interface JobMessage { + id: string + timestamp: number + queue: string + data: any + opts?: JobOptions +} /** * Bull works with a Job wrapper around all messages that contains a lot more information about @@ -10,11 +19,13 @@ import { timeout } from "../utils" * @returns A new job which can now be put onto the queue, this is mostly an * internal structure so that an in memory queue can be easily swapped for a Bull queue. */ -function newJob(queue: string, message: any) { +function newJob(queue: string, message: any, opts?: JobOptions): JobMessage { return { + id: newid(), timestamp: Date.now(), queue: queue, data: message, + opts, } } @@ -23,26 +34,29 @@ function newJob(queue: string, message: any) { * It is relatively simple, using an event emitter internally to register when messages are available * to the consumers - in can support many inputs and many consumers. */ -class InMemoryQueue { +class InMemoryQueue implements Partial { _name: string - _opts?: any - _messages: any[] - _emitter: EventEmitter + _opts?: QueueOptions + _messages: JobMessage[] + _queuedJobIds: Set + _emitter: NodeJS.EventEmitter _runCount: number _addCount: number + /** * The constructor the queue, exactly the same as that of Bulls. * @param name The name of the queue which is being configured. * @param opts This is not used by the in memory queue as there is no real use * case when in memory, but is the same API as Bull */ - constructor(name: string, opts?: any) { + constructor(name: string, opts?: QueueOptions) { this._name = name this._opts = opts this._messages = [] this._emitter = new events.EventEmitter() this._runCount = 0 this._addCount = 0 + this._queuedJobIds = new Set() } /** @@ -54,20 +68,44 @@ class InMemoryQueue { * note this is incredibly limited compared to Bull as in reality the Job would contain * a lot more information about the queue and current status of Bull cluster. */ - process(func: any) { + async process(func: any) { this._emitter.on("message", async () => { if (this._messages.length <= 0) { return } let msg = this._messages.shift() + let resp = func(msg) + + async function retryFunc(fnc: any) { + try { + await fnc + } catch (e: any) { + await new Promise(r => setTimeout(() => r(), 50)) + + await retryFunc(func(msg)) + } + } + if (resp.then != null) { - await resp + try { + await retryFunc(resp) + } catch (e: any) { + console.error(e) + } } this._runCount++ + const jobId = msg?.opts?.jobId?.toString() + if (jobId && msg?.opts?.removeOnComplete) { + this._queuedJobIds.delete(jobId) + } }) } + async isReady() { + return this as any + } + // simply puts a message to the queue and emits to the queue for processing /** * Simple function to replicate the add message functionality of Bull, putting @@ -78,27 +116,45 @@ class InMemoryQueue { * @param repeat serves no purpose for the import queue. */ // eslint-disable-next-line no-unused-vars - add(msg: any, repeat: boolean) { - if (typeof msg !== "object") { + async add(data: any, opts?: JobOptions) { + const jobId = opts?.jobId?.toString() + if (jobId && this._queuedJobIds.has(jobId)) { + console.log(`Ignoring already queued job ${jobId}`) + return + } + + if (typeof data !== "object") { throw "Queue only supports carrying JSON." } - this._messages.push(newJob(this._name, msg)) - this._addCount++ - this._emitter.emit("message") + if (jobId) { + this._queuedJobIds.add(jobId) + } + + const pushMessage = () => { + this._messages.push(newJob(this._name, data, opts)) + this._addCount++ + this._emitter.emit("message") + } + + const delay = opts?.delay + if (delay) { + setTimeout(pushMessage, delay) + } else { + pushMessage() + } + return {} as any } /** * replicating the close function from bull, which waits for jobs to finish. */ - async close() { - return [] - } + async close() {} /** * This removes a cron which has been implemented, this is part of Bull API. * @param cronJobId The cron which is to be removed. */ - removeRepeatableByKey(cronJobId: string) { + async removeRepeatableByKey(cronJobId: string) { // TODO: implement for testing console.log(cronJobId) } @@ -106,12 +162,12 @@ class InMemoryQueue { /** * Implemented for tests */ - getRepeatableJobs() { + async getRepeatableJobs() { return [] } - // eslint-disable-next-line no-unused-vars - removeJobs(pattern: string) { + // eslint-disable-next-line @typescript-eslint/no-unused-vars + async removeJobs(pattern: string) { // no-op } @@ -123,18 +179,22 @@ class InMemoryQueue { } async getJob() { - return {} + return null } on() { // do nothing - return this + return this as any } async waitForCompletion() { do { await timeout(50) - } while (this._addCount < this._runCount) + } while (this.hasRunningJobs()) + } + + hasRunningJobs() { + return this._addCount > this._runCount } } diff --git a/packages/backend-core/src/queue/listeners.ts b/packages/backend-core/src/queue/listeners.ts index 42e3172364..cd25ff2254 100644 --- a/packages/backend-core/src/queue/listeners.ts +++ b/packages/backend-core/src/queue/listeners.ts @@ -87,6 +87,8 @@ enum QueueEventType { APP_BACKUP_EVENT = "app-backup-event", AUDIT_LOG_EVENT = "audit-log-event", SYSTEM_EVENT = "system-event", + APP_MIGRATION = "app-migration", + DOC_WRITETHROUGH = "doc-writethrough", } const EventTypeMap: { [key in JobQueue]: QueueEventType } = { @@ -94,6 +96,8 @@ const EventTypeMap: { [key in JobQueue]: QueueEventType } = { [JobQueue.APP_BACKUP]: QueueEventType.APP_BACKUP_EVENT, [JobQueue.AUDIT_LOG]: QueueEventType.AUDIT_LOG_EVENT, [JobQueue.SYSTEM_EVENT_QUEUE]: QueueEventType.SYSTEM_EVENT, + [JobQueue.APP_MIGRATION]: QueueEventType.APP_MIGRATION, + [JobQueue.DOC_WRITETHROUGH_QUEUE]: QueueEventType.DOC_WRITETHROUGH, } function logging(queue: Queue, jobQueue: JobQueue) { @@ -128,7 +132,7 @@ function logging(queue: Queue, jobQueue: JobQueue) { // A Job is waiting to be processed as soon as a worker is idling. console.info(...getLogParams(eventType, BullEvent.WAITING, { jobId })) }) - .on(BullEvent.ACTIVE, async (job: Job, jobPromise: any) => { + .on(BullEvent.ACTIVE, async (job: Job) => { // A job has started. You can use `jobPromise.cancel()`` to abort it. await doInJobContext(job, () => { console.info(...getLogParams(eventType, BullEvent.ACTIVE, { job })) diff --git a/packages/backend-core/src/queue/queue.ts b/packages/backend-core/src/queue/queue.ts index c0d1861de3..1838eed92f 100644 --- a/packages/backend-core/src/queue/queue.ts +++ b/packages/backend-core/src/queue/queue.ts @@ -2,11 +2,12 @@ import env from "../environment" import { getRedisOptions } from "../redis/utils" import { JobQueue } from "./constants" import InMemoryQueue from "./inMemoryQueue" -import BullQueue, { QueueOptions } from "bull" +import BullQueue, { QueueOptions, JobOptions } from "bull" import { addListeners, StalledFn } from "./listeners" import { Duration } from "../utils" import * as timers from "../timers" -import * as Redis from "ioredis" + +export { QueueOptions, Queue, JobOptions } from "bull" // the queue lock is held for 5 minutes const QUEUE_LOCK_MS = Duration.fromMinutes(5).toMs() @@ -25,17 +26,24 @@ async function cleanup() { export function createQueue( jobQueue: JobQueue, - opts: { removeStalledCb?: StalledFn } = {} + opts: { + removeStalledCb?: StalledFn + maxStalledCount?: number + jobOptions?: JobOptions + } = {} ): BullQueue.Queue { - const { opts: redisOpts, redisProtocolUrl } = getRedisOptions() + const redisOpts = getRedisOptions() const queueConfig: QueueOptions = { - redis: redisProtocolUrl! || (redisOpts as Redis.RedisOptions), + redis: redisOpts, settings: { - maxStalledCount: 0, + maxStalledCount: opts.maxStalledCount ? opts.maxStalledCount : 0, lockDuration: QUEUE_LOCK_MS, lockRenewTime: QUEUE_LOCK_RENEW_INTERNAL_MS, }, } + if (opts.jobOptions) { + queueConfig.defaultJobOptions = opts.jobOptions + } let queue: any if (!env.isTest()) { queue = new BullQueue(jobQueue, queueConfig) @@ -48,7 +56,7 @@ export function createQueue( cleanupInterval = timers.set(cleanup, CLEANUP_PERIOD_MS) // fire off an initial cleanup cleanup().catch(err => { - console.error(`Unable to cleanup automation queue initially - ${err}`) + console.error(`Unable to cleanup ${jobQueue} initially - ${err}`) }) } return queue diff --git a/packages/backend-core/src/redis/init.ts b/packages/backend-core/src/redis/init.ts index 55ffe3dd12..44ba28a83c 100644 --- a/packages/backend-core/src/redis/init.ts +++ b/packages/backend-core/src/redis/init.ts @@ -7,19 +7,27 @@ let userClient: Client, cacheClient: Client, writethroughClient: Client, lockClient: Client, - socketClient: Client + socketClient: Client, + inviteClient: Client, + passwordResetClient: Client, + docWritethroughClient: Client -async function init() { +export async function init() { userClient = await new Client(utils.Databases.USER_CACHE).init() sessionClient = await new Client(utils.Databases.SESSIONS).init() appClient = await new Client(utils.Databases.APP_METADATA).init() cacheClient = await new Client(utils.Databases.GENERIC_CACHE).init() lockClient = await new Client(utils.Databases.LOCKS).init() writethroughClient = await new Client(utils.Databases.WRITE_THROUGH).init() + inviteClient = await new Client(utils.Databases.INVITATIONS).init() + passwordResetClient = await new Client(utils.Databases.PW_RESETS).init() socketClient = await new Client( utils.Databases.SOCKET_IO, utils.SelectableDatabase.SOCKET_IO ).init() + docWritethroughClient = await new Client( + utils.Databases.DOC_WRITE_THROUGH + ).init() } export async function shutdown() { @@ -29,7 +37,10 @@ export async function shutdown() { if (cacheClient) await cacheClient.finish() if (writethroughClient) await writethroughClient.finish() if (lockClient) await lockClient.finish() + if (inviteClient) await inviteClient.finish() + if (passwordResetClient) await passwordResetClient.finish() if (socketClient) await socketClient.finish() + if (docWritethroughClient) await docWritethroughClient.finish() } process.on("exit", async () => { @@ -84,3 +95,24 @@ export async function getSocketClient() { } return socketClient } + +export async function getInviteClient() { + if (!inviteClient) { + await init() + } + return inviteClient +} + +export async function getPasswordResetClient() { + if (!passwordResetClient) { + await init() + } + return passwordResetClient +} + +export async function getDocWritethroughClient() { + if (!writethroughClient) { + await init() + } + return writethroughClient +} diff --git a/packages/backend-core/src/redis/redis.ts b/packages/backend-core/src/redis/redis.ts index d1e2d8989e..79f75421d3 100644 --- a/packages/backend-core/src/redis/redis.ts +++ b/packages/backend-core/src/redis/redis.ts @@ -1,5 +1,5 @@ import env from "../environment" -import Redis from "ioredis" +import Redis, { Cluster } from "ioredis" // mock-redis doesn't have any typing let MockRedis: any | undefined if (env.MOCK_REDIS) { @@ -16,7 +16,9 @@ import { getRedisOptions, SEPARATOR, SelectableDatabase, + getRedisConnectionDetails, } from "./utils" +import { logAlert } from "../logging" import * as timers from "../timers" const RETRY_PERIOD_MS = 2000 @@ -26,8 +28,7 @@ const DEFAULT_SELECT_DB = SelectableDatabase.DEFAULT // for testing just generate the client once let CLOSED = false -let CLIENTS: { [key: number]: any } = {} -0 +const CLIENTS: Record = {} let CONNECTED = false // mock redis always connected @@ -35,25 +36,20 @@ if (env.MOCK_REDIS) { CONNECTED = true } -function pickClient(selectDb: number): any { +function pickClient(selectDb: number) { return CLIENTS[selectDb] } -function connectionError( - selectDb: number, - timeout: NodeJS.Timeout, - err: Error | string -) { +function connectionError(timeout: NodeJS.Timeout, err: Error | string) { // manually shut down, ignore errors if (CLOSED) { return } - pickClient(selectDb).disconnect() CLOSED = true // always clear this on error clearTimeout(timeout) CONNECTED = false - console.error("Redis connection failed - " + err) + logAlert("Redis connection failed", err) setTimeout(() => { init() }, RETRY_PERIOD_MS) @@ -79,11 +75,7 @@ function init(selectDb = DEFAULT_SELECT_DB) { // start the timer - only allowed 5 seconds to connect timeout = setTimeout(() => { if (!CONNECTED) { - connectionError( - selectDb, - timeout, - "Did not successfully connect in timeout" - ) + connectionError(timeout, "Did not successfully connect in timeout") } }, STARTUP_TIMEOUT_MS) @@ -91,12 +83,11 @@ function init(selectDb = DEFAULT_SELECT_DB) { if (client) { client.disconnect() } - const { redisProtocolUrl, opts, host, port } = getRedisOptions() + const { host, port } = getRedisConnectionDetails() + const opts = getRedisOptions() if (CLUSTERED) { client = new RedisCore.Cluster([{ host, port }], opts) - } else if (redisProtocolUrl) { - client = new RedisCore(redisProtocolUrl) } else { client = new RedisCore(opts) } @@ -107,12 +98,13 @@ function init(selectDb = DEFAULT_SELECT_DB) { // allow the process to exit return } - connectionError(selectDb, timeout, err) + connectionError(timeout, err) }) client.on("error", (err: Error) => { - connectionError(selectDb, timeout, err) + connectionError(timeout, err) }) client.on("connect", () => { + console.log(`Connected to Redis DB: ${selectDb}`) clearTimeout(timeout) CONNECTED = true }) @@ -209,12 +201,15 @@ class RedisWrapper { key = `${db}${SEPARATOR}${key}` let stream if (CLUSTERED) { - let node = this.getClient().nodes("master") + let node = (this.getClient() as never as Cluster).nodes("master") stream = node[0].scanStream({ match: key + "*", count: 100 }) } else { - stream = this.getClient().scanStream({ match: key + "*", count: 100 }) + stream = (this.getClient() as Redis).scanStream({ + match: key + "*", + count: 100, + }) } - return promisifyStream(stream, this.getClient()) + return promisifyStream(stream, this.getClient() as any) } async keys(pattern: string) { @@ -229,14 +224,16 @@ class RedisWrapper { async get(key: string) { const db = this._db - let response = await this.getClient().get(addDbPrefix(db, key)) + const response = await this.getClient().get(addDbPrefix(db, key)) // overwrite the prefixed key + // @ts-ignore if (response != null && response.key) { + // @ts-ignore response.key = key } // if its not an object just return the response try { - return JSON.parse(response) + return JSON.parse(response!) } catch (err) { return response } @@ -282,13 +279,37 @@ class RedisWrapper { } } + async bulkStore( + data: Record, + expirySeconds: number | null = null + ) { + const client = this.getClient() + + const dataToStore = Object.entries(data).reduce((acc, [key, value]) => { + acc[addDbPrefix(this._db, key)] = + typeof value === "object" ? JSON.stringify(value) : value + return acc + }, {} as Record) + + const pipeline = client.pipeline() + pipeline.mset(dataToStore) + + if (expirySeconds !== null) { + for (const key of Object.keys(dataToStore)) { + pipeline.expire(key, expirySeconds) + } + } + + await pipeline.exec() + } + async getTTL(key: string) { const db = this._db const prefixedKey = addDbPrefix(db, key) return this.getClient().ttl(prefixedKey) } - async setExpiry(key: string, expirySeconds: number | null) { + async setExpiry(key: string, expirySeconds: number) { const db = this._db const prefixedKey = addDbPrefix(db, key) await this.getClient().expire(prefixedKey, expirySeconds) @@ -299,10 +320,35 @@ class RedisWrapper { await this.getClient().del(addDbPrefix(db, key)) } + async bulkDelete(keys: string[]) { + const db = this._db + await this.getClient().del(keys.map(key => addDbPrefix(db, key))) + } + async clear() { let items = await this.scan() await Promise.all(items.map((obj: any) => this.delete(obj.key))) } + + async increment(key: string) { + const result = await this.getClient().incr(addDbPrefix(this._db, key)) + if (isNaN(result)) { + throw new Error(`Redis ${key} does not contain a number`) + } + return result + } + + async deleteIfValue(key: string, value: any) { + const client = this.getClient() + + const luaScript = ` + if redis.call('GET', KEYS[1]) == ARGV[1] then + redis.call('DEL', KEYS[1]) + end + ` + + await client.eval(luaScript, 1, addDbPrefix(this._db, key), value) + } } export default RedisWrapper diff --git a/packages/backend-core/src/redis/redlockImpl.ts b/packages/backend-core/src/redis/redlockImpl.ts index 7fe61a409e..adeb5b12ec 100644 --- a/packages/backend-core/src/redis/redlockImpl.ts +++ b/packages/backend-core/src/redis/redlockImpl.ts @@ -2,7 +2,8 @@ import Redlock from "redlock" import { getLockClient } from "./init" import { LockOptions, LockType } from "@budibase/types" import * as context from "../context" -import env from "../environment" +import { utils } from "@budibase/shared-core" +import { Duration } from "../utils" async function getClient( type: LockType, @@ -11,9 +12,7 @@ async function getClient( if (type === LockType.CUSTOM) { return newRedlock(opts) } - if (env.isTest() && type !== LockType.TRY_ONCE) { - return newRedlock(OPTIONS.TEST) - } + switch (type) { case LockType.TRY_ONCE: { return newRedlock(OPTIONS.TRY_ONCE) @@ -27,13 +26,16 @@ async function getClient( case LockType.DELAY_500: { return newRedlock(OPTIONS.DELAY_500) } + case LockType.AUTO_EXTEND: { + return newRedlock(OPTIONS.AUTO_EXTEND) + } default: { - throw new Error(`Could not get redlock client: ${type}`) + throw utils.unreachable(type) } } } -const OPTIONS = { +const OPTIONS: Record = { TRY_ONCE: { // immediately throws an error if the lock is already held retryCount: 0, @@ -41,11 +43,6 @@ const OPTIONS = { TRY_TWICE: { retryCount: 1, }, - TEST: { - // higher retry count in unit tests - // due to high contention. - retryCount: 100, - }, DEFAULT: { // the expected clock drift; for more details // see http://redis.io/topics/distlock @@ -66,12 +63,16 @@ const OPTIONS = { DELAY_500: { retryDelay: 500, }, + CUSTOM: {}, + AUTO_EXTEND: { + retryCount: -1, + }, } export async function newRedlock(opts: Redlock.Options = {}) { - let options = { ...OPTIONS.DEFAULT, ...opts } + const options = { ...OPTIONS.DEFAULT, ...opts } const redisWrapper = await getLockClient() - const client = redisWrapper.getClient() + const client = redisWrapper.getClient() as any return new Redlock([client], options) } @@ -99,24 +100,42 @@ function getLockName(opts: LockOptions) { return name } +export const AUTO_EXTEND_POLLING_MS = Duration.fromSeconds(10).toMs() + export async function doWithLock( opts: LockOptions, task: () => Promise ): Promise> { const redlock = await getClient(opts.type, opts.customOptions) - let lock + let lock: Redlock.Lock | undefined + let timeout try { const name = getLockName(opts) + const ttl = + opts.type === LockType.AUTO_EXTEND ? AUTO_EXTEND_POLLING_MS : opts.ttl + // create the lock - lock = await redlock.lock(name, opts.ttl) + lock = await redlock.lock(name, ttl) + + if (opts.type === LockType.AUTO_EXTEND) { + // We keep extending the lock while the task is running + const extendInIntervals = (): void => { + timeout = setTimeout(async () => { + lock = await lock!.extend(ttl, () => opts.onExtend && opts.onExtend()) + + extendInIntervals() + }, ttl / 2) + } + + extendInIntervals() + } // perform locked task // need to await to ensure completion before unlocking const result = await task() return { executed: true, result } } catch (e: any) { - console.warn("lock error") // lock limit exceeded if (e.name === "LockError") { if (opts.type === LockType.TRY_ONCE) { @@ -124,16 +143,13 @@ export async function doWithLock( // due to retry count (0) exceeded return { executed: false } } else { - console.error(e) throw e } } else { - console.error(e) throw e } } finally { - if (lock) { - await lock.unlock() - } + clearTimeout(timeout) + await lock?.unlock() } } diff --git a/packages/backend-core/src/redis/tests/redis.spec.ts b/packages/backend-core/src/redis/tests/redis.spec.ts new file mode 100644 index 0000000000..4d11caf220 --- /dev/null +++ b/packages/backend-core/src/redis/tests/redis.spec.ts @@ -0,0 +1,203 @@ +import { GenericContainer, StartedTestContainer } from "testcontainers" +import { generator, structures } from "../../../tests" +import RedisWrapper from "../redis" +import { env } from "../.." + +jest.setTimeout(30000) + +describe("redis", () => { + let redis: RedisWrapper + let container: StartedTestContainer + + beforeAll(async () => { + const container = await new GenericContainer("redis") + .withExposedPorts(6379) + .start() + + env._set( + "REDIS_URL", + `${container.getHost()}:${container.getMappedPort(6379)}` + ) + env._set("MOCK_REDIS", 0) + env._set("REDIS_PASSWORD", 0) + }) + + afterAll(() => container?.stop()) + + beforeEach(async () => { + redis = new RedisWrapper(structures.db.id()) + await redis.init() + }) + + describe("store", () => { + it("a basic value can be persisted", async () => { + const key = structures.uuid() + const value = generator.word() + + await redis.store(key, value) + + expect(await redis.get(key)).toEqual(value) + }) + + it("objects can be persisted", async () => { + const key = structures.uuid() + const value = { [generator.word()]: generator.word() } + + await redis.store(key, value) + + expect(await redis.get(key)).toEqual(value) + }) + }) + + describe("bulkStore", () => { + function createRandomObject( + keyLength: number, + valueGenerator: () => any = () => generator.word() + ) { + return generator + .unique(() => generator.word(), keyLength) + .reduce((acc, key) => { + acc[key] = valueGenerator() + return acc + }, {} as Record) + } + + it("a basic object can be persisted", async () => { + const data = createRandomObject(10) + + await redis.bulkStore(data) + + for (const [key, value] of Object.entries(data)) { + expect(await redis.get(key)).toEqual(value) + } + + expect(await redis.keys("*")).toHaveLength(10) + }) + + it("a complex object can be persisted", async () => { + const data = { + ...createRandomObject(10, () => createRandomObject(5)), + ...createRandomObject(5), + } + + await redis.bulkStore(data) + + for (const [key, value] of Object.entries(data)) { + expect(await redis.get(key)).toEqual(value) + } + + expect(await redis.keys("*")).toHaveLength(15) + }) + + it("no TTL is set by default", async () => { + const data = createRandomObject(10) + + await redis.bulkStore(data) + + for (const [key, value] of Object.entries(data)) { + expect(await redis.get(key)).toEqual(value) + expect(await redis.getTTL(key)).toEqual(-1) + } + }) + + it("a bulk store can be persisted with TTL", async () => { + const ttl = 500 + const data = createRandomObject(8) + + await redis.bulkStore(data, ttl) + + for (const [key, value] of Object.entries(data)) { + expect(await redis.get(key)).toEqual(value) + expect(await redis.getTTL(key)).toEqual(ttl) + } + + expect(await redis.keys("*")).toHaveLength(8) + }) + + it("setting a TTL of -1 will not persist the key", async () => { + const ttl = -1 + const data = createRandomObject(5) + + await redis.bulkStore(data, ttl) + + for (const key of Object.keys(data)) { + expect(await redis.get(key)).toBe(null) + } + + expect(await redis.keys("*")).toHaveLength(0) + }) + }) + + describe("increment", () => { + it("can increment on a new key", async () => { + const key = structures.uuid() + const result = await redis.increment(key) + expect(result).toBe(1) + }) + + it("can increment multiple times", async () => { + const key = structures.uuid() + const results = [ + await redis.increment(key), + await redis.increment(key), + await redis.increment(key), + await redis.increment(key), + await redis.increment(key), + ] + expect(results).toEqual([1, 2, 3, 4, 5]) + }) + + it("can increment multiple times in parallel", async () => { + const key = structures.uuid() + const results = await Promise.all( + Array.from({ length: 100 }).map(() => redis.increment(key)) + ) + expect(results).toHaveLength(100) + expect(results).toEqual(Array.from({ length: 100 }).map((_, i) => i + 1)) + }) + + it("can increment existing set keys", async () => { + const key = structures.uuid() + await redis.store(key, 70) + await redis.increment(key) + + const result = await redis.increment(key) + expect(result).toBe(72) + }) + + it.each([ + generator.word(), + generator.bool(), + { [generator.word()]: generator.word() }, + ])("cannot increment if the store value is not a number", async value => { + const key = structures.uuid() + await redis.store(key, value) + + await expect(redis.increment(key)).rejects.toThrow( + "ERR value is not an integer or out of range" + ) + }) + }) + + describe("deleteIfValue", () => { + it("can delete if the value matches", async () => { + const key = structures.uuid() + const value = generator.word() + await redis.store(key, value) + + await redis.deleteIfValue(key, value) + + expect(await redis.get(key)).toBeNull() + }) + + it("will not delete if the value does not matches", async () => { + const key = structures.uuid() + const value = generator.word() + await redis.store(key, value) + + await redis.deleteIfValue(key, generator.word()) + + expect(await redis.get(key)).toEqual(value) + }) + }) +}) diff --git a/packages/backend-core/src/redis/tests/redlockImpl.spec.ts b/packages/backend-core/src/redis/tests/redlockImpl.spec.ts new file mode 100644 index 0000000000..e647b63bf5 --- /dev/null +++ b/packages/backend-core/src/redis/tests/redlockImpl.spec.ts @@ -0,0 +1,105 @@ +import { LockName, LockType, LockOptions } from "@budibase/types" +import { AUTO_EXTEND_POLLING_MS, doWithLock } from "../redlockImpl" +import { DBTestConfiguration, generator } from "../../../tests" + +describe("redlockImpl", () => { + beforeEach(() => { + jest.useFakeTimers() + }) + + describe("doWithLock", () => { + const config = new DBTestConfiguration() + const lockTtl = AUTO_EXTEND_POLLING_MS + + function runLockWithExecutionTime({ + opts, + task, + executionTimeMs, + }: { + opts: LockOptions + task: () => Promise + executionTimeMs: number + }) { + return config.doInTenant(() => + doWithLock(opts, async () => { + // Run in multiple intervals until hitting the expected time + const interval = lockTtl / 10 + for (let i = executionTimeMs; i > 0; i -= interval) { + await jest.advanceTimersByTimeAsync(interval) + } + return task() + }) + ) + } + + it.each(Object.values(LockType))( + "should return the task value and release the lock", + async (lockType: LockType) => { + const expectedResult = generator.guid() + const mockTask = jest.fn().mockResolvedValue(expectedResult) + + const opts: LockOptions = { + name: LockName.PERSIST_WRITETHROUGH, + type: lockType, + ttl: lockTtl, + } + + const result = await runLockWithExecutionTime({ + opts, + task: mockTask, + executionTimeMs: 0, + }) + + expect(result.executed).toBe(true) + expect(result.executed && result.result).toBe(expectedResult) + expect(mockTask).toHaveBeenCalledTimes(1) + } + ) + + it("should extend when type is autoextend", async () => { + const expectedResult = generator.guid() + const mockTask = jest.fn().mockResolvedValue(expectedResult) + const mockOnExtend = jest.fn() + + const opts: LockOptions = { + name: LockName.PERSIST_WRITETHROUGH, + type: LockType.AUTO_EXTEND, + onExtend: mockOnExtend, + } + + const result = await runLockWithExecutionTime({ + opts, + task: mockTask, + executionTimeMs: lockTtl * 2.5, + }) + + expect(result.executed).toBe(true) + expect(result.executed && result.result).toBe(expectedResult) + expect(mockTask).toHaveBeenCalledTimes(1) + expect(mockOnExtend).toHaveBeenCalledTimes(5) + }) + + it.each(Object.values(LockType).filter(t => t !== LockType.AUTO_EXTEND))( + "should timeout when type is %s", + async (lockType: LockType) => { + const mockTask = jest.fn().mockResolvedValue("mockResult") + + const opts: LockOptions = { + name: LockName.PERSIST_WRITETHROUGH, + type: lockType, + ttl: lockTtl, + } + + await expect( + runLockWithExecutionTime({ + opts, + task: mockTask, + executionTimeMs: lockTtl * 2, + }) + ).rejects.toThrow( + `Unable to fully release the lock on resource "lock:${config.tenantId}_persist_writethrough".` + ) + } + ) + }) +}) diff --git a/packages/backend-core/src/redis/utils.ts b/packages/backend-core/src/redis/utils.ts index 34b7275a2b..7f84f11467 100644 --- a/packages/backend-core/src/redis/utils.ts +++ b/packages/backend-core/src/redis/utils.ts @@ -1,4 +1,5 @@ import env from "../environment" +import * as Redis from "ioredis" const SLOT_REFRESH_MS = 2000 const CONNECT_TIMEOUT_MS = 10000 @@ -28,6 +29,8 @@ export enum Databases { WRITE_THROUGH = "writeThrough", LOCKS = "locks", SOCKET_IO = "socket_io", + BPM_EVENTS = "bpmEvents", + DOC_WRITE_THROUGH = "docWriteThrough", } /** @@ -42,7 +45,7 @@ export enum Databases { export enum SelectableDatabase { DEFAULT = 0, SOCKET_IO = 1, - UNUSED_1 = 2, + RATE_LIMITING = 2, UNUSED_2 = 3, UNUSED_3 = 4, UNUSED_4 = 5, @@ -58,7 +61,7 @@ export enum SelectableDatabase { UNUSED_14 = 15, } -export function getRedisOptions() { +export function getRedisConnectionDetails() { let password = env.REDIS_PASSWORD let url: string[] | string = env.REDIS_URL.split("//") // get rid of the protocol @@ -74,28 +77,36 @@ export function getRedisOptions() { } const [host, port] = url.split(":") - let redisProtocolUrl - - // fully qualified redis URL - if (/rediss?:\/\//.test(env.REDIS_URL)) { - redisProtocolUrl = env.REDIS_URL + const portNumber = parseInt(port) + return { + host, + password, + // assume default port for redis if invalid found + port: isNaN(portNumber) ? 6379 : portNumber, } +} - const opts: any = { +export function getRedisOptions() { + const { host, password, port } = getRedisConnectionDetails() + let redisOpts: Redis.RedisOptions = { connectTimeout: CONNECT_TIMEOUT_MS, + port: port, + host, + password, } + let opts: Redis.ClusterOptions | Redis.RedisOptions = redisOpts if (env.REDIS_CLUSTERED) { - opts.redisOptions = {} - opts.redisOptions.tls = {} - opts.redisOptions.password = password - opts.slotsRefreshTimeout = SLOT_REFRESH_MS - opts.dnsLookup = (address: string, callback: any) => callback(null, address) - } else { - opts.host = host - opts.port = port - opts.password = password + opts = { + connectTimeout: CONNECT_TIMEOUT_MS, + redisOptions: { + ...redisOpts, + tls: {}, + }, + slotsRefreshTimeout: SLOT_REFRESH_MS, + dnsLookup: (address: string, callback: any) => callback(null, address), + } as Redis.ClusterOptions } - return { opts, host, port: parseInt(port), redisProtocolUrl } + return opts } export function addDbPrefix(db: string, key: string) { diff --git a/packages/backend-core/src/security/auth.ts b/packages/backend-core/src/security/auth.ts new file mode 100644 index 0000000000..1cce35a0af --- /dev/null +++ b/packages/backend-core/src/security/auth.ts @@ -0,0 +1,24 @@ +import env from "../environment" + +export const PASSWORD_MIN_LENGTH = +(env.PASSWORD_MIN_LENGTH || 8) +export const PASSWORD_MAX_LENGTH = +(env.PASSWORD_MAX_LENGTH || 512) + +export function validatePassword( + password: string +): { valid: true } | { valid: false; error: string } { + if (!password || password.length < PASSWORD_MIN_LENGTH) { + return { + valid: false, + error: `Password invalid. Minimum ${PASSWORD_MIN_LENGTH} characters.`, + } + } + + if (password.length > PASSWORD_MAX_LENGTH) { + return { + valid: false, + error: `Password invalid. Maximum ${PASSWORD_MAX_LENGTH} characters.`, + } + } + + return { valid: true } +} diff --git a/packages/backend-core/src/security/encryption.ts b/packages/backend-core/src/security/encryption.ts index 7a8cfaf04a..45ed566a92 100644 --- a/packages/backend-core/src/security/encryption.ts +++ b/packages/backend-core/src/security/encryption.ts @@ -73,6 +73,9 @@ export async function encryptFile( const outputFileName = `${filename}.enc` const filePath = join(dir, filename) + if (fs.lstatSync(filePath).isDirectory()) { + throw new Error("Unable to encrypt directory") + } const inputFile = fs.createReadStream(filePath) const outputFile = fs.createWriteStream(join(dir, outputFileName)) @@ -110,6 +113,9 @@ export async function decryptFile( outputPath: string, secret: string ) { + if (fs.lstatSync(inputPath).isDirectory()) { + throw new Error("Unable to encrypt directory") + } const { salt, iv } = await getSaltAndIV(inputPath) const inputFile = fs.createReadStream(inputPath, { start: SALT_LENGTH + IV_LENGTH, diff --git a/packages/backend-core/src/security/index.ts b/packages/backend-core/src/security/index.ts new file mode 100644 index 0000000000..306751af96 --- /dev/null +++ b/packages/backend-core/src/security/index.ts @@ -0,0 +1 @@ +export * from "./auth" diff --git a/packages/backend-core/src/security/permissions.ts b/packages/backend-core/src/security/permissions.ts index fe4095d210..98704f16c6 100644 --- a/packages/backend-core/src/security/permissions.ts +++ b/packages/backend-core/src/security/permissions.ts @@ -160,4 +160,5 @@ export function isPermissionLevelHigherThanRead(level: PermissionLevel) { // utility as a lot of things need simply the builder permission export const BUILDER = PermissionType.BUILDER +export const CREATOR = PermissionType.CREATOR export const GLOBAL_BUILDER = PermissionType.GLOBAL_BUILDER diff --git a/packages/backend-core/src/security/roles.ts b/packages/backend-core/src/security/roles.ts index 0d33031de5..a64be6b319 100644 --- a/packages/backend-core/src/security/roles.ts +++ b/packages/backend-core/src/security/roles.ts @@ -1,7 +1,12 @@ import { BuiltinPermissionID, PermissionLevel } from "./permissions" -import { prefixRoleID, getRoleParams, DocumentType, SEPARATOR } from "../db" +import { + prefixRoleID, + getRoleParams, + DocumentType, + SEPARATOR, + doWithDB, +} from "../db" import { getAppDB } from "../context" -import { doWithDB } from "../db" import { Screen, Role as RoleDoc } from "@budibase/types" import cloneDeep from "lodash/fp/cloneDeep" @@ -79,25 +84,24 @@ export function getBuiltinRoles(): { [key: string]: RoleDoc } { return cloneDeep(BUILTIN_ROLES) } -export const BUILTIN_ROLE_ID_ARRAY = Object.values(BUILTIN_ROLES).map( - role => role._id -) +export function isBuiltin(role: string) { + return getBuiltinRole(role) !== undefined +} -export const BUILTIN_ROLE_NAME_ARRAY = Object.values(BUILTIN_ROLES).map( - role => role.name -) - -export function isBuiltin(role?: string) { - return BUILTIN_ROLE_ID_ARRAY.some(builtin => role?.includes(builtin)) +export function getBuiltinRole(roleId: string): Role | undefined { + const role = Object.values(BUILTIN_ROLES).find(role => + roleId.includes(role._id) + ) + if (!role) { + return undefined + } + return cloneDeep(role) } /** * Works through the inheritance ranks to see how far up the builtin stack this ID is. */ -export function builtinRoleToNumber(id?: string) { - if (!id) { - return 0 - } +export function builtinRoleToNumber(id: string) { const builtins = getBuiltinRoles() const MAX = Object.values(builtins).length + 1 if (id === BUILTIN_IDS.ADMIN || id === BUILTIN_IDS.BUILDER) { @@ -118,7 +122,7 @@ export function builtinRoleToNumber(id?: string) { /** * Converts any role to a number, but has to be async to get the roles from db. */ -export async function roleToNumber(id?: string) { +export async function roleToNumber(id: string) { if (isBuiltin(id)) { return builtinRoleToNumber(id) } @@ -126,7 +130,7 @@ export async function roleToNumber(id?: string) { defaultPublic: true, })) as RoleDoc[] for (let role of hierarchy) { - if (isBuiltin(role?.inherits)) { + if (role?.inherits && isBuiltin(role.inherits)) { return builtinRoleToNumber(role.inherits) + 1 } } @@ -156,35 +160,28 @@ export function lowerBuiltinRoleID(roleId1?: string, roleId2?: string): string { * @returns The role object, which may contain an "inherits" property. */ export async function getRole( - roleId?: string, + roleId: string, opts?: { defaultPublic?: boolean } -): Promise { - if (!roleId) { - return undefined - } - let role: any = {} +): Promise { // built in roles mostly come from the in-code implementation, // but can be extended by a doc stored about them (e.g. permissions) - if (isBuiltin(roleId)) { - role = cloneDeep( - Object.values(BUILTIN_ROLES).find(role => role._id === roleId) - ) - } else { + let role: RoleDoc | undefined = getBuiltinRole(roleId) + if (!role) { // make sure has the prefix (if it has it then it won't be added) roleId = prefixRoleID(roleId) } try { const db = getAppDB() - const dbRole = await db.get(getDBRoleID(roleId)) - role = Object.assign(role, dbRole) + const dbRole = await db.get(getDBRoleID(roleId)) + role = Object.assign(role || {}, dbRole) // finalise the ID - role._id = getExternalRoleID(role._id, role.version) + role._id = getExternalRoleID(role._id!, role.version) } catch (err) { if (!isBuiltin(roleId) && opts?.defaultPublic) { return cloneDeep(BUILTIN_ROLES.PUBLIC) } // only throw an error if there is no role at all - if (Object.keys(role).length === 0) { + if (!role || Object.keys(role).length === 0) { throw err } } @@ -195,7 +192,7 @@ export async function getRole( * Simple function to get all the roles based on the top level user role ID. */ async function getAllUserRoles( - userRoleId?: string, + userRoleId: string, opts?: { defaultPublic?: boolean } ): Promise { // admins have access to all roles @@ -221,7 +218,7 @@ async function getAllUserRoles( } export async function getUserRoleIdHierarchy( - userRoleId?: string + userRoleId: string ): Promise { const roles = await getUserRoleHierarchy(userRoleId) return roles.map(role => role._id!) @@ -236,7 +233,7 @@ export async function getUserRoleIdHierarchy( * highest level of access and the last being the lowest level. */ export async function getUserRoleHierarchy( - userRoleId?: string, + userRoleId: string, opts?: { defaultPublic?: boolean } ) { // special case, if they don't have a role then they are a public user @@ -260,9 +257,9 @@ export function checkForRoleResourceArray( return rolePerms } -export async function getAllRoleIds(appId?: string) { +export async function getAllRoleIds(appId: string): Promise { const roles = await getAllRoles(appId) - return roles.map(role => role._id) + return roles.map(role => role._id!) } /** diff --git a/packages/backend-core/src/security/sessions.ts b/packages/backend-core/src/security/sessions.ts index 5a535c0c46..8d7b43d5b6 100644 --- a/packages/backend-core/src/security/sessions.ts +++ b/packages/backend-core/src/security/sessions.ts @@ -1,7 +1,8 @@ -const redis = require("../redis/init") -const { v4: uuidv4 } = require("uuid") -const { logWarn } = require("../logging") +import * as redis from "../redis/init" +import { v4 as uuidv4 } from "uuid" +import { logWarn } from "../logging" import env from "../environment" +import { Duration } from "../utils" import { Session, ScannedSession, @@ -9,8 +10,10 @@ import { CreateSession, } from "@budibase/types" -// a week in seconds -const EXPIRY_SECONDS = 86400 * 7 +// a week expiry is the default +const EXPIRY_SECONDS = env.SESSION_EXPIRY_SECONDS + ? parseInt(env.SESSION_EXPIRY_SECONDS) + : Duration.fromDays(7).toSeconds() function makeSessionID(userId: string, sessionId: string) { return `${userId}/${sessionId}` diff --git a/packages/backend-core/src/security/tests/auth.spec.ts b/packages/backend-core/src/security/tests/auth.spec.ts new file mode 100644 index 0000000000..b1835fdfb3 --- /dev/null +++ b/packages/backend-core/src/security/tests/auth.spec.ts @@ -0,0 +1,45 @@ +import { generator } from "../../../tests" +import { PASSWORD_MAX_LENGTH, validatePassword } from "../auth" + +describe("auth", () => { + describe("validatePassword", () => { + it("a valid password returns successful", () => { + expect(validatePassword("password")).toEqual({ valid: true }) + }) + + it.each([ + ["undefined", undefined], + ["null", null], + ["empty", ""], + ])("%s returns unsuccessful", (_, password) => { + expect(validatePassword(password as string)).toEqual({ + valid: false, + error: "Password invalid. Minimum 8 characters.", + }) + }) + + it.each([ + generator.word({ length: PASSWORD_MAX_LENGTH }), + generator.paragraph().substring(0, PASSWORD_MAX_LENGTH), + ])(`can use passwords up to 512 characters in length`, password => { + expect(validatePassword(password)).toEqual({ + valid: true, + }) + }) + + it.each([ + generator.word({ length: PASSWORD_MAX_LENGTH + 1 }), + generator + .paragraph({ sentences: 50 }) + .substring(0, PASSWORD_MAX_LENGTH + 1), + ])( + `passwords cannot have more than ${PASSWORD_MAX_LENGTH} characters`, + password => { + expect(validatePassword(password)).toEqual({ + valid: false, + error: "Password invalid. Maximum 512 characters.", + }) + } + ) + }) +}) diff --git a/packages/backend-core/src/tenancy/tenancy.ts b/packages/backend-core/src/tenancy/tenancy.ts index 7b17bdbe18..8835960ca5 100644 --- a/packages/backend-core/src/tenancy/tenancy.ts +++ b/packages/backend-core/src/tenancy/tenancy.ts @@ -39,7 +39,7 @@ const ALL_STRATEGIES = Object.values(TenantResolutionStrategy) export const getTenantIDFromCtx = ( ctx: BBContext, opts: GetTenantIdOptions -): string | null => { +): string | undefined => { // exit early if not multi-tenant if (!isMultiTenant()) { return DEFAULT_TENANT_ID @@ -93,11 +93,19 @@ export const getTenantIDFromCtx = ( // subdomain if (isAllowed(TenantResolutionStrategy.SUBDOMAIN)) { // e.g. budibase.app or local.com:10000 - const platformHost = new URL(getPlatformURL()).host.split(":")[0] + let platformHost + try { + platformHost = new URL(getPlatformURL()).host.split(":")[0] + } catch (err: any) { + // if invalid URL, just don't try to process subdomain + if (err.code !== "ERR_INVALID_URL") { + throw err + } + } // e.g. tenant.budibase.app or tenant.local.com const requestHost = ctx.host // parse the tenant id from the difference - if (requestHost.includes(platformHost)) { + if (platformHost && requestHost.includes(platformHost)) { const tenantId = requestHost.substring( 0, requestHost.indexOf(`.${platformHost}`) @@ -136,5 +144,5 @@ export const getTenantIDFromCtx = ( ctx.throw(403, "Tenant id not set") } - return null + return undefined } diff --git a/packages/backend-core/src/tenancy/tests/tenancy.spec.ts b/packages/backend-core/src/tenancy/tests/tenancy.spec.ts index ebeaca074c..34e9f87064 100644 --- a/packages/backend-core/src/tenancy/tests/tenancy.spec.ts +++ b/packages/backend-core/src/tenancy/tests/tenancy.spec.ts @@ -157,12 +157,12 @@ describe("getTenantIDFromCtx", () => { TenantResolutionStrategy.PATH, ], } - expect(getTenantIDFromCtx(ctx, mockOpts)).toBeNull() - expect(ctx.throw).toBeCalledTimes(1) - expect(ctx.throw).toBeCalledWith(403, "Tenant id not set") + expect(getTenantIDFromCtx(ctx, mockOpts)).toBeUndefined() + expect(ctx.throw).toHaveBeenCalledTimes(1) + expect(ctx.throw).toHaveBeenCalledWith(403, "Tenant id not set") }) - it("returns null if allowNoTenant is true", () => { + it("returns undefined if allowNoTenant is true", () => { const ctx = createCtx({}) mockOpts = { allowNoTenant: true, @@ -172,7 +172,7 @@ describe("getTenantIDFromCtx", () => { TenantResolutionStrategy.PATH, ], } - expect(getTenantIDFromCtx(ctx, mockOpts)).toBeNull() + expect(getTenantIDFromCtx(ctx, mockOpts)).toBeUndefined() }) }) diff --git a/packages/backend-core/src/users/db.ts b/packages/backend-core/src/users/db.ts index c071064713..04d3264e6f 100644 --- a/packages/backend-core/src/users/db.ts +++ b/packages/backend-core/src/users/db.ts @@ -1,6 +1,5 @@ import env from "../environment" import * as eventHelpers from "./events" -import * as accounts from "../accounts" import * as accountSdk from "../accounts" import * as cache from "../cache" import { getGlobalDB, getIdentity, getTenantId } from "../context" @@ -11,12 +10,10 @@ import * as sessions from "../security/sessions" import * as usersCore from "./users" import { Account, - AllDocsResponse, BulkUserCreated, BulkUserDeleted, isSSOAccount, isSSOUser, - RowResponse, SaveUserOpts, User, UserStatus, @@ -30,6 +27,7 @@ import { } from "./utils" import { searchExistingEmails } from "./lookup" import { hash } from "../utils" +import { validatePassword } from "../security" type QuotaUpdateFn = ( change: number, @@ -46,6 +44,12 @@ type GroupFns = { getBulk: GroupGetFn getGroupBuilderAppIds: GroupBuildersFn } +type CreateAdminUserOpts = { + ssoId?: string + hashPassword?: boolean + requirePassword?: boolean + skipPasswordValidation?: boolean +} type FeatureFns = { isSSOEnforced: FeatureFn; isAppBuildersEnabled: FeatureFn } const bulkDeleteProcessing = async (dbUser: User) => { @@ -113,6 +117,14 @@ export class UserDB { if (await UserDB.isPreventPasswordActions(user, account)) { throw new HTTPError("Password change is disabled for this user", 400) } + + if (!opts.skipPasswordValidation) { + const passwordValidation = validatePassword(password) + if (!passwordValidation.valid) { + throw new HTTPError(passwordValidation.error, 400) + } + } + hashedPassword = opts.hashPassword ? await hash(password) : password } else if (dbUser) { hashedPassword = dbUser.password @@ -149,12 +161,12 @@ export class UserDB { static async allUsers() { const db = getGlobalDB() - const response = await db.allDocs( + const response = await db.allDocs( dbUtils.getGlobalUserParams(null, { include_docs: true, }) ) - return response.rows.map((row: any) => row.doc) + return response.rows.map(row => row.doc!) } static async countUsersByApp(appId: string) { @@ -212,13 +224,6 @@ export class UserDB { throw new Error("_id or email is required") } - if ( - user.builder?.apps?.length && - !(await UserDB.features.isAppBuildersEnabled()) - ) { - throw new Error("Unable to update app builders, please check license") - } - let dbUser: User | undefined if (_id) { // try to get existing user from db @@ -246,7 +251,8 @@ export class UserDB { } const change = dbUser ? 0 : 1 // no change if there is existing user - const creatorsChange = isCreator(dbUser) !== isCreator(user) ? 1 : 0 + const creatorsChange = + (await isCreator(dbUser)) !== (await isCreator(user)) ? 1 : 0 return UserDB.quotas.addUsers(change, creatorsChange, async () => { await validateUniqueUser(email, tenantId) @@ -303,7 +309,7 @@ export class UserDB { static async bulkCreate( newUsersRequested: User[], - groups: string[] + groups?: string[] ): Promise { const tenantId = getTenantId() @@ -328,9 +334,9 @@ export class UserDB { }) continue } - newUser.userGroups = groups + newUser.userGroups = groups || [] newUsers.push(newUser) - if (isCreator(newUser)) { + if (await isCreator(newUser)) { newCreators.push(newUser) } } @@ -413,15 +419,13 @@ export class UserDB { } // Get users and delete - const allDocsResponse: AllDocsResponse = await db.allDocs({ + const allDocsResponse = await db.allDocs({ include_docs: true, keys: userIds, }) - const usersToDelete: User[] = allDocsResponse.rows.map( - (user: RowResponse) => { - return user.doc - } - ) + const usersToDelete = allDocsResponse.rows.map(user => { + return user.doc! + }) // Delete from DB const toDelete = usersToDelete.map(user => ({ @@ -429,12 +433,16 @@ export class UserDB { _deleted: true, })) const dbResponse = await usersCore.bulkUpdateGlobalUsers(toDelete) - const creatorsToDelete = usersToDelete.filter(isCreator) + + const creatorsEval = await Promise.all(usersToDelete.map(isCreator)) + const creatorsToDeleteCount = creatorsEval.filter( + creator => !!creator + ).length for (let user of usersToDelete) { await bulkDeleteProcessing(user) } - await UserDB.quotas.removeUsers(toDelete.length, creatorsToDelete.length) + await UserDB.quotas.removeUsers(toDelete.length, creatorsToDeleteCount) // Build Response // index users by id @@ -469,7 +477,7 @@ export class UserDB { if (!env.SELF_HOSTED && !env.DISABLE_ACCOUNT_PORTAL) { // root account holder can't be deleted from inside budibase const email = dbUser.email - const account = await accounts.getAccount(email) + const account = await accountSdk.getAccount(email) if (account) { if (dbUser.userId === getIdentity()!._id) { throw new HTTPError('Please visit "Account" to delete this user', 400) @@ -483,13 +491,45 @@ export class UserDB { await db.remove(userId, dbUser._rev) - const creatorsToDelete = isCreator(dbUser) ? 1 : 0 + const creatorsToDelete = (await isCreator(dbUser)) ? 1 : 0 await UserDB.quotas.removeUsers(1, creatorsToDelete) await eventHelpers.handleDeleteEvents(dbUser) await cache.user.invalidateUser(userId) await sessions.invalidateSessions(userId, { reason: "deletion" }) } + static async createAdminUser( + email: string, + tenantId: string, + password?: string, + opts?: CreateAdminUserOpts + ) { + const user: User = { + email: email, + password, + createdAt: Date.now(), + roles: {}, + builder: { + global: true, + }, + admin: { + global: true, + }, + tenantId, + } + if (opts?.ssoId) { + user.ssoId = opts.ssoId + } + // always bust checklist beforehand, if an error occurs but can proceed, don't get + // stuck in a cycle + await cache.bustCache(cache.CacheKey.CHECKLIST) + return await UserDB.save(user, { + hashPassword: opts?.hashPassword, + requirePassword: opts?.requirePassword, + skipPasswordValidation: opts?.skipPasswordValidation, + }) + } + static async getGroups(groupIds: string[]) { return await this.groups.getBulk(groupIds) } diff --git a/packages/backend-core/src/users/lookup.ts b/packages/backend-core/src/users/lookup.ts index 17d0e91d88..355be74dab 100644 --- a/packages/backend-core/src/users/lookup.ts +++ b/packages/backend-core/src/users/lookup.ts @@ -6,6 +6,7 @@ import { } from "@budibase/types" import * as dbUtils from "../db" import { ViewName } from "../constants" +import { getExistingInvites } from "../cache/invite" /** * Apply a system-wide search on emails: @@ -26,6 +27,9 @@ export async function searchExistingEmails(emails: string[]) { const existingAccounts = await getExistingAccounts(emails) matchedEmails.push(...existingAccounts.map(account => account.email)) + const invitedEmails = await getExistingInvites(emails) + matchedEmails.push(...invitedEmails.map(invite => invite.email)) + return [...new Set(matchedEmails.map(email => email.toLowerCase()))] } diff --git a/packages/backend-core/src/users/test/utils.spec.ts b/packages/backend-core/src/users/test/utils.spec.ts new file mode 100644 index 0000000000..cb98b8972b --- /dev/null +++ b/packages/backend-core/src/users/test/utils.spec.ts @@ -0,0 +1,67 @@ +import { User, UserGroup } from "@budibase/types" +import { generator, structures } from "../../../tests" +import { DBTestConfiguration } from "../../../tests/extra" +import { getGlobalDB } from "../../context" +import { isCreator } from "../utils" + +const config = new DBTestConfiguration() + +describe("Users", () => { + it("User is a creator if it is configured as a global builder", async () => { + const user: User = structures.users.user({ builder: { global: true } }) + expect(await isCreator(user)).toBe(true) + }) + + it("User is a creator if it is configured as a global admin", async () => { + const user: User = structures.users.user({ admin: { global: true } }) + expect(await isCreator(user)).toBe(true) + }) + + it("User is a creator if it is configured with creator permission", async () => { + const user: User = structures.users.user({ builder: { creator: true } }) + expect(await isCreator(user)).toBe(true) + }) + + it("User is a creator if it is a builder in some application", async () => { + const user: User = structures.users.user({ builder: { apps: ["app1"] } }) + expect(await isCreator(user)).toBe(true) + }) + + it("User is a creator if it has CREATOR permission in some application", async () => { + const user: User = structures.users.user({ roles: { app1: "CREATOR" } }) + expect(await isCreator(user)).toBe(true) + }) + + it("User is a creator if it has ADMIN permission in some application", async () => { + const user: User = structures.users.user({ roles: { app1: "ADMIN" } }) + expect(await isCreator(user)).toBe(true) + }) + + it("User is a creator if it remains to a group with ADMIN permissions", async () => { + const usersInGroup = 10 + const groupId = "gr_17abffe89e0b40268e755b952f101a59" + const group: UserGroup = { + ...structures.userGroups.userGroup(), + ...{ _id: groupId, roles: { app1: "ADMIN" } }, + } + const users: User[] = [] + for (let i = 0; i < usersInGroup; i++) { + const userId = `us_${generator.guid()}` + const user: User = structures.users.user({ + _id: userId, + userGroups: [groupId], + }) + users.push(user) + } + + await config.doInTenant(async () => { + const db = getGlobalDB() + await db.put(group) + for (let user of users) { + await db.put(user) + const creator = await isCreator(user) + expect(creator).toBe(true) + } + }) + }) +}) diff --git a/packages/backend-core/src/users/users.ts b/packages/backend-core/src/users/users.ts index 6dc8750b62..638da4a5b1 100644 --- a/packages/backend-core/src/users/users.ts +++ b/packages/backend-core/src/users/users.ts @@ -25,6 +25,7 @@ import { import { getGlobalDB } from "../context" import * as context from "../context" import { isCreator } from "./utils" +import { UserDB } from "./db" type GetOpts = { cleanup?: boolean } @@ -43,7 +44,7 @@ function removeUserPassword(users: User | User[]) { return users } -export const isSupportedUserSearch = (query: SearchQuery) => { +export function isSupportedUserSearch(query: SearchQuery) { const allowed = [ { op: SearchQueryOperators.STRING, key: "email" }, { op: SearchQueryOperators.EQUAL, key: "_id" }, @@ -68,10 +69,10 @@ export const isSupportedUserSearch = (query: SearchQuery) => { return true } -export const bulkGetGlobalUsersById = async ( +export async function bulkGetGlobalUsersById( userIds: string[], opts?: GetOpts -) => { +) { const db = getGlobalDB() let users = ( await db.allDocs({ @@ -85,7 +86,7 @@ export const bulkGetGlobalUsersById = async ( return users } -export const getAllUserIds = async () => { +export async function getAllUserIds() { const db = getGlobalDB() const startKey = `${DocumentType.USER}${SEPARATOR}` const response = await db.allDocs({ @@ -95,7 +96,7 @@ export const getAllUserIds = async () => { return response.rows.map(row => row.id) } -export const bulkUpdateGlobalUsers = async (users: User[]) => { +export async function bulkUpdateGlobalUsers(users: User[]) { const db = getGlobalDB() return (await db.bulkDocs(users)) as BulkDocsResponse } @@ -113,10 +114,10 @@ export async function getById(id: string, opts?: GetOpts): Promise { * Given an email address this will use a view to search through * all the users to find one with this email address. */ -export const getGlobalUserByEmail = async ( +export async function getGlobalUserByEmail( email: String, opts?: GetOpts -): Promise => { +): Promise { if (email == null) { throw "Must supply an email address to view" } @@ -139,11 +140,23 @@ export const getGlobalUserByEmail = async ( return user } -export const searchGlobalUsersByApp = async ( +export async function doesUserExist(email: string) { + try { + const user = await getGlobalUserByEmail(email) + if (Array.isArray(user) || user != null) { + return true + } + } catch (err) { + return false + } + return false +} + +export async function searchGlobalUsersByApp( appId: any, opts: DatabaseQueryOpts, getOpts?: GetOpts -) => { +) { if (typeof appId !== "string") { throw new Error("Must provide a string based app ID") } @@ -151,7 +164,7 @@ export const searchGlobalUsersByApp = async ( include_docs: true, }) params.startkey = opts && opts.startkey ? opts.startkey : params.startkey - let response = await queryGlobalView(ViewName.USER_BY_APP, params) + let response = await queryGlobalView(ViewName.USER_BY_APP, params) if (!response) { response = [] @@ -167,10 +180,10 @@ export const searchGlobalUsersByApp = async ( Return any user who potentially has access to the application Admins, developers and app users with the explicitly role. */ -export const searchGlobalUsersByAppAccess = async ( +export async function searchGlobalUsersByAppAccess( appId: any, opts?: { limit?: number } -) => { +) { const roleSelector = `roles.${appId}` let orQuery: any[] = [ @@ -205,7 +218,7 @@ export const searchGlobalUsersByAppAccess = async ( return resp.rows } -export const getGlobalUserByAppPage = (appId: string, user: User) => { +export function getGlobalUserByAppPage(appId: string, user: User) { if (!user) { return } @@ -215,11 +228,11 @@ export const getGlobalUserByAppPage = (appId: string, user: User) => { /** * Performs a starts with search on the global email view. */ -export const searchGlobalUsersByEmail = async ( +export async function searchGlobalUsersByEmail( email: string | unknown, opts: any, getOpts?: GetOpts -) => { +) { if (typeof email !== "string") { throw new Error("Must provide a string to search by") } @@ -242,12 +255,12 @@ export const searchGlobalUsersByEmail = async ( } const PAGE_LIMIT = 8 -export const paginatedUsers = async ({ +export async function paginatedUsers({ bookmark, query, appId, limit, -}: SearchUsersRequest = {}) => { +}: SearchUsersRequest = {}) { const db = getGlobalDB() const pageSize = limit ?? PAGE_LIMIT const pageLimit = pageSize + 1 @@ -296,7 +309,8 @@ export async function getCreatorCount() { let creators = 0 async function iterate(startPage?: string) { const page = await paginatedUsers({ bookmark: startPage }) - creators += page.data.filter(isCreator).length + const creatorsEval = await Promise.all(page.data.map(isCreator)) + creators += creatorsEval.filter(creator => !!creator).length if (page.hasNextPage) { await iterate(page.nextPage) } @@ -324,3 +338,20 @@ export function cleanseUserObject(user: User | ContextUser, base?: User) { } return user } + +export async function addAppBuilder(user: User, appId: string) { + const prodAppId = getProdAppID(appId) + user.builder ??= {} + user.builder.creator = true + user.builder.apps ??= [] + user.builder.apps.push(prodAppId) + await UserDB.save(user, { hashPassword: false }) +} + +export async function removeAppBuilder(user: User, appId: string) { + const prodAppId = getProdAppID(appId) + if (user.builder && user.builder.apps?.includes(prodAppId)) { + user.builder.apps = user.builder.apps.filter(id => id !== prodAppId) + } + await UserDB.save(user, { hashPassword: false }) +} diff --git a/packages/backend-core/src/users/utils.ts b/packages/backend-core/src/users/utils.ts index 0ef4b77998..348ad1532f 100644 --- a/packages/backend-core/src/users/utils.ts +++ b/packages/backend-core/src/users/utils.ts @@ -1,4 +1,4 @@ -import { CloudAccount } from "@budibase/types" +import { CloudAccount, ContextUser, User, UserGroup } from "@budibase/types" import * as accountSdk from "../accounts" import env from "../environment" import { getPlatformUser } from "./lookup" @@ -6,17 +6,48 @@ import { EmailUnavailableError } from "../errors" import { getTenantId } from "../context" import { sdk } from "@budibase/shared-core" import { getAccountByTenantId } from "../accounts" +import { BUILTIN_ROLE_IDS } from "../security/roles" +import * as context from "../context" // extract from shared-core to make easily accessible from backend-core export const isBuilder = sdk.users.isBuilder export const isAdmin = sdk.users.isAdmin -export const isCreator = sdk.users.isCreator export const isGlobalBuilder = sdk.users.isGlobalBuilder export const isAdminOrBuilder = sdk.users.isAdminOrBuilder export const hasAdminPermissions = sdk.users.hasAdminPermissions export const hasBuilderPermissions = sdk.users.hasBuilderPermissions export const hasAppBuilderPermissions = sdk.users.hasAppBuilderPermissions +export async function isCreator(user?: User | ContextUser) { + const isCreatorByUserDefinition = sdk.users.isCreator(user) + if (!isCreatorByUserDefinition && user) { + return await isCreatorByGroupMembership(user) + } + return isCreatorByUserDefinition +} + +async function isCreatorByGroupMembership(user?: User | ContextUser) { + const userGroups = user?.userGroups || [] + if (userGroups.length > 0) { + const db = context.getGlobalDB() + const groups: UserGroup[] = [] + for (let groupId of userGroups) { + try { + const group = await db.get(groupId) + groups.push(group) + } catch (e: any) { + if (e.error !== "not_found") { + throw e + } + } + } + return groups.some(group => + Object.values(group.roles || {}).includes(BUILTIN_ROLE_IDS.ADMIN) + ) + } + return false +} + export async function validateUniqueUser(email: string, tenantId: string) { // check budibase users in other tenants if (env.MULTI_TENANCY) { diff --git a/packages/backend-core/src/utils/Duration.ts b/packages/backend-core/src/utils/Duration.ts index f376c2f7c7..730b59d1dc 100644 --- a/packages/backend-core/src/utils/Duration.ts +++ b/packages/backend-core/src/utils/Duration.ts @@ -28,6 +28,9 @@ export class Duration { toMs: () => { return Duration.convert(from, DurationType.MILLISECONDS, duration) }, + toSeconds: () => { + return Duration.convert(from, DurationType.SECONDS, duration) + }, } } @@ -46,4 +49,8 @@ export class Duration { static fromDays(duration: number) { return Duration.from(DurationType.DAYS, duration) } + + static fromMilliseconds(duration: number) { + return Duration.from(DurationType.MILLISECONDS, duration) + } } diff --git a/packages/backend-core/src/utils/hashing.ts b/packages/backend-core/src/utils/hashing.ts index aba11f38e6..54d7de4aba 100644 --- a/packages/backend-core/src/utils/hashing.ts +++ b/packages/backend-core/src/utils/hashing.ts @@ -1,4 +1,5 @@ import env from "../environment" + export * from "../docIds/newid" const bcrypt = env.JS_BCRYPT ? require("bcryptjs") : require("bcrypt") diff --git a/packages/backend-core/src/utils/tests/utils.spec.ts b/packages/backend-core/src/utils/tests/utils.spec.ts index 5a0ac4f283..4dc3855c35 100644 --- a/packages/backend-core/src/utils/tests/utils.spec.ts +++ b/packages/backend-core/src/utils/tests/utils.spec.ts @@ -44,11 +44,11 @@ describe("utils", () => { it("gets appId from url", async () => { await config.doInTenant(async () => { - const url = "http://test.com" + const url = "http://example.com" env._set("PLATFORM_URL", url) const ctx = structures.koa.newContext() - ctx.host = `${config.tenantId}.test.com` + ctx.host = `${config.tenantId}.example.com` const expected = db.generateAppID(config.tenantId) const app = structures.apps.app(expected) @@ -89,7 +89,7 @@ describe("utils", () => { const ctx = structures.koa.newContext() const expected = db.generateAppID() ctx.request.headers = { - referer: `http://test.com/builder/app/${expected}/design/screen_123/screens`, + referer: `http://example.com/builder/app/${expected}/design/screen_123/screens`, } const actual = await utils.getAppIdFromCtx(ctx) @@ -100,7 +100,7 @@ describe("utils", () => { const ctx = structures.koa.newContext() const appId = db.generateAppID() ctx.request.headers = { - referer: `http://test.com/foo/app/${appId}/bar`, + referer: `http://example.com/foo/app/${appId}/bar`, } const actual = await utils.getAppIdFromCtx(ctx) @@ -188,4 +188,17 @@ describe("utils", () => { expectResult(false) }) }) + + describe("hasCircularStructure", () => { + it("should detect a circular structure", () => { + const a: any = { b: "b" } + const b = { a } + a.b = b + expect(utils.hasCircularStructure(b)).toBe(true) + }) + + it("should allow none circular structures", () => { + expect(utils.hasCircularStructure({ a: "b" })).toBe(false) + }) + }) }) diff --git a/packages/backend-core/src/utils/utils.ts b/packages/backend-core/src/utils/utils.ts index b92471a7a4..30cf55b149 100644 --- a/packages/backend-core/src/utils/utils.ts +++ b/packages/backend-core/src/utils/utils.ts @@ -11,7 +11,7 @@ import { TenantResolutionStrategy, } from "@budibase/types" import type { SetOption } from "cookies" -const jwt = require("jsonwebtoken") +import jwt, { Secret } from "jsonwebtoken" const APP_PREFIX = DocumentType.APP + SEPARATOR const PROD_APP_PREFIX = "/app/" @@ -31,8 +31,8 @@ export async function resolveAppUrl(ctx: Ctx) { const appUrl = ctx.path.split("/")[2] let possibleAppUrl = `/${appUrl.toLowerCase()}` - let tenantId: string | null = context.getTenantId() - if (env.MULTI_TENANCY) { + let tenantId: string | undefined = context.getTenantId() + if (!env.isDev() && env.MULTI_TENANCY) { // always use the tenant id from the subdomain in multi tenancy // this ensures the logged-in user tenant id doesn't overwrite // e.g. in the case of viewing a public app while already logged-in to another tenant @@ -41,7 +41,7 @@ export async function resolveAppUrl(ctx: Ctx) { }) } - // search prod apps for a url that matches + // search prod apps for an url that matches const apps: App[] = await context.doInTenant( tenantId, () => getAllApps({ dev: false }) as Promise @@ -59,10 +59,7 @@ export function isServingApp(ctx: Ctx) { return true } // prod app - if (ctx.path.startsWith(PROD_APP_PREFIX)) { - return true - } - return false + return ctx.path.startsWith(PROD_APP_PREFIX) } export function isServingBuilder(ctx: Ctx): boolean { @@ -99,7 +96,7 @@ export async function getAppIdFromCtx(ctx: Ctx) { } // look in the path - const pathId = parseAppIdFromUrl(ctx.path) + const pathId = parseAppIdFromUrlPath(ctx.path) if (!appId && pathId) { appId = confirmAppId(pathId) } @@ -119,34 +116,37 @@ export async function getAppIdFromCtx(ctx: Ctx) { // referer header is present from a builder redirect const referer = ctx.request.headers.referer if (!appId && referer?.includes(BUILDER_APP_PREFIX)) { - const refererId = parseAppIdFromUrl(ctx.request.headers.referer) + const refererId = parseAppIdFromUrlPath(ctx.request.headers.referer) appId = confirmAppId(refererId) } return appId } -function parseAppIdFromUrl(url?: string) { +function parseAppIdFromUrlPath(url?: string) { if (!url) { return } - return url.split("/").find(subPath => subPath.startsWith(APP_PREFIX)) + return url + .split("?")[0] // Remove any possible query string + .split("/") + .find(subPath => subPath.startsWith(APP_PREFIX)) } /** * opens the contents of the specified encrypted JWT. * @return the contents of the token. */ -export function openJwt(token: string) { +export function openJwt(token?: string): T | undefined { if (!token) { - return token + return undefined } try { - return jwt.verify(token, env.JWT_SECRET) + return jwt.verify(token, env.JWT_SECRET as Secret) as T } catch (e) { if (env.JWT_SECRET_FALLBACK) { // fallback to enable rotation - return jwt.verify(token, env.JWT_SECRET_FALLBACK) + return jwt.verify(token, env.JWT_SECRET_FALLBACK) as T } else { throw e } @@ -158,13 +158,9 @@ export function isValidInternalAPIKey(apiKey: string) { return true } // fallback to enable rotation - if ( - env.INTERNAL_API_KEY_FALLBACK && - env.INTERNAL_API_KEY_FALLBACK === apiKey - ) { - return true - } - return false + return !!( + env.INTERNAL_API_KEY_FALLBACK && env.INTERNAL_API_KEY_FALLBACK === apiKey + ) } /** @@ -172,14 +168,14 @@ export function isValidInternalAPIKey(apiKey: string) { * @param ctx The request which is to be manipulated. * @param name The name of the cookie to get. */ -export function getCookie(ctx: Ctx, name: string) { +export function getCookie(ctx: Ctx, name: string) { const cookie = ctx.cookies.get(name) if (!cookie) { - return cookie + return undefined } - return openJwt(cookie) + return openJwt(cookie) } /** @@ -196,7 +192,7 @@ export function setCookie( opts = { sign: true } ) { if (value && opts && opts.sign) { - value = jwt.sign(value, env.JWT_SECRET) + value = jwt.sign(value, env.JWT_SECRET as Secret) } const config: SetOption = { @@ -237,3 +233,17 @@ export function timeout(timeMs: number) { export function isAudited(event: Event) { return !!AuditedEventFriendlyName[event] } + +export function hasCircularStructure(json: any) { + if (typeof json !== "object") { + return false + } + try { + JSON.stringify(json) + } catch (err) { + if (err instanceof Error && err?.message.includes("circular structure")) { + return true + } + } + return false +} diff --git a/packages/backend-core/tests/core/users/users.spec.js b/packages/backend-core/tests/core/users/users.spec.js index ae7109344a..f08c435b95 100644 --- a/packages/backend-core/tests/core/users/users.spec.js +++ b/packages/backend-core/tests/core/users/users.spec.js @@ -1,5 +1,5 @@ -const _ = require('lodash/fp') -const {structures} = require("../../../tests") +const _ = require("lodash/fp") +const { structures } = require("../../../tests") jest.mock("../../../src/context") jest.mock("../../../src/db") @@ -7,10 +7,9 @@ jest.mock("../../../src/db") const context = require("../../../src/context") const db = require("../../../src/db") -const {getCreatorCount} = require('../../../src/users/users') +const { getCreatorCount } = require("../../../src/users/users") describe("Users", () => { - let getGlobalDBMock let getGlobalUserParamsMock let paginationMock @@ -26,26 +25,26 @@ describe("Users", () => { it("Retrieves the number of creators", async () => { const getUsers = (offset, limit, creators = false) => { const range = _.range(offset, limit) - const opts = creators ? {builder: {global: true}} : undefined + const opts = creators ? { builder: { global: true } } : undefined return range.map(() => structures.users.user(opts)) } const page1Data = getUsers(0, 8) const page2Data = getUsers(8, 12, true) getGlobalDBMock.mockImplementation(() => ({ - name : "fake-db", + name: "fake-db", allDocs: () => ({ - rows: [...page1Data, ...page2Data] - }) + rows: [...page1Data, ...page2Data], + }), })) paginationMock.mockImplementationOnce(() => ({ data: page1Data, hasNextPage: true, - nextPage: "1" + nextPage: "1", })) paginationMock.mockImplementation(() => ({ data: page2Data, hasNextPage: false, - nextPage: undefined + nextPage: undefined, })) const creatorsCount = await getCreatorCount() expect(creatorsCount).toBe(4) diff --git a/packages/backend-core/tests/core/utilities/mocks/alerts.ts b/packages/backend-core/tests/core/utilities/mocks/alerts.ts index 90c9759c92..0b26e98363 100644 --- a/packages/backend-core/tests/core/utilities/mocks/alerts.ts +++ b/packages/backend-core/tests/core/utilities/mocks/alerts.ts @@ -1,3 +1,4 @@ jest.mock("../../../../src/logging/alerts") import * as _alerts from "../../../../src/logging/alerts" + export const alerts = jest.mocked(_alerts) diff --git a/packages/backend-core/tests/core/utilities/mocks/events.ts b/packages/backend-core/tests/core/utilities/mocks/events.ts index fef730768a..96f351de10 100644 --- a/packages/backend-core/tests/core/utilities/mocks/events.ts +++ b/packages/backend-core/tests/core/utilities/mocks/events.ts @@ -15,6 +15,7 @@ beforeAll(async () => { jest.spyOn(events.app, "created") jest.spyOn(events.app, "updated") + jest.spyOn(events.app, "duplicated") jest.spyOn(events.app, "deleted") jest.spyOn(events.app, "published") jest.spyOn(events.app, "unpublished") diff --git a/packages/backend-core/tests/core/utilities/mocks/index.ts b/packages/backend-core/tests/core/utilities/mocks/index.ts index 9a72b38ef5..8705e563cb 100644 --- a/packages/backend-core/tests/core/utilities/mocks/index.ts +++ b/packages/backend-core/tests/core/utilities/mocks/index.ts @@ -1,5 +1,6 @@ jest.mock("../../../../src/accounts") import * as _accounts from "../../../../src/accounts" + export const accounts = jest.mocked(_accounts) export * as date from "./date" diff --git a/packages/backend-core/tests/core/utilities/mocks/licenses.ts b/packages/backend-core/tests/core/utilities/mocks/licenses.ts index 758fd6bf9a..1cbc282575 100644 --- a/packages/backend-core/tests/core/utilities/mocks/licenses.ts +++ b/packages/backend-core/tests/core/utilities/mocks/licenses.ts @@ -58,7 +58,7 @@ export const useCloudFree = () => { // FEATURES const useFeature = (feature: Feature) => { - const license = cloneDeep(UNLIMITED_LICENSE) + const license = cloneDeep(getCachedLicense() || UNLIMITED_LICENSE) const opts: UseLicenseOpts = { features: [feature], } diff --git a/packages/backend-core/tests/core/utilities/structures/accounts.ts b/packages/backend-core/tests/core/utilities/structures/accounts.ts index 515f94db1e..7dcc2de116 100644 --- a/packages/backend-core/tests/core/utilities/structures/accounts.ts +++ b/packages/backend-core/tests/core/utilities/structures/accounts.ts @@ -18,7 +18,7 @@ export const account = (partial: Partial = {}): Account => { return { accountId: uuid(), tenantId: generator.word(), - email: generator.email(), + email: generator.email({ domain: "example.com" }), tenantName: generator.word(), hosting: Hosting.SELF, createdAt: Date.now(), diff --git a/packages/backend-core/tests/core/utilities/structures/common.ts b/packages/backend-core/tests/core/utilities/structures/common.ts index 05b879f36b..9b1b178f0b 100644 --- a/packages/backend-core/tests/core/utilities/structures/common.ts +++ b/packages/backend-core/tests/core/utilities/structures/common.ts @@ -3,5 +3,5 @@ import { v4 as uuid } from "uuid" export { v4 as uuid } from "uuid" export const email = () => { - return `${uuid()}@test.com` + return `${uuid()}@example.com` } diff --git a/packages/backend-core/tests/core/utilities/structures/generator.ts b/packages/backend-core/tests/core/utilities/structures/generator.ts index ed4dac8255..64eb5ecc97 100644 --- a/packages/backend-core/tests/core/utilities/structures/generator.ts +++ b/packages/backend-core/tests/core/utilities/structures/generator.ts @@ -1,2 +1,3 @@ import Chance from "./Chance" + export const generator = new Chance() diff --git a/packages/backend-core/tests/core/utilities/structures/scim.ts b/packages/backend-core/tests/core/utilities/structures/scim.ts index 80f41c605d..f424b2881a 100644 --- a/packages/backend-core/tests/core/utilities/structures/scim.ts +++ b/packages/backend-core/tests/core/utilities/structures/scim.ts @@ -13,7 +13,7 @@ interface CreateUserRequestFields { export function createUserRequest(userData?: Partial) { const defaultValues = { externalId: uuid(), - email: generator.email(), + email: `${uuid()}@example.com`, firstName: generator.first(), lastName: generator.last(), username: generator.name(), diff --git a/packages/backend-core/tests/core/utilities/structures/sso.ts b/packages/backend-core/tests/core/utilities/structures/sso.ts index 2e3af712a9..6492283e6a 100644 --- a/packages/backend-core/tests/core/utilities/structures/sso.ts +++ b/packages/backend-core/tests/core/utilities/structures/sso.ts @@ -61,7 +61,7 @@ export function ssoProfile(user?: User): SSOProfile { }, _json: { email: user.email, - picture: "http://test.com", + picture: "http://example.com", }, provider: generator.string(), } diff --git a/packages/backend-core/tests/core/utilities/structures/userGroups.ts b/packages/backend-core/tests/core/utilities/structures/userGroups.ts index 4dc870a00a..4af3f72e51 100644 --- a/packages/backend-core/tests/core/utilities/structures/userGroups.ts +++ b/packages/backend-core/tests/core/utilities/structures/userGroups.ts @@ -3,7 +3,7 @@ import { generator } from "./generator" export function userGroup(): UserGroup { return { - name: generator.word(), + name: generator.guid(), icon: generator.word(), color: generator.word(), } diff --git a/packages/backend-core/tests/core/utilities/structures/users.ts b/packages/backend-core/tests/core/utilities/structures/users.ts index 66d23696e0..db90887af2 100644 --- a/packages/backend-core/tests/core/utilities/structures/users.ts +++ b/packages/backend-core/tests/core/utilities/structures/users.ts @@ -12,7 +12,7 @@ import { generator } from "./generator" import { tenant } from "." export const newEmail = () => { - return `${uuid()}@test.com` + return `${uuid()}@example.com` } export const user = (userProps?: Partial>): User => { @@ -21,11 +21,11 @@ export const user = (userProps?: Partial>): User => { _id: userId, userId, email: newEmail(), - password: "test", + password: "password", roles: { app_test: "admin" }, firstName: generator.first(), lastName: generator.last(), - pictureUrl: "http://test.com", + pictureUrl: "http://example.com", tenantId: tenant.id(), ...userProps, } diff --git a/packages/backend-core/tests/core/utilities/testContainerUtils.ts b/packages/backend-core/tests/core/utilities/testContainerUtils.ts index 7da6cbc777..5d4f5a3c11 100644 --- a/packages/backend-core/tests/core/utilities/testContainerUtils.ts +++ b/packages/backend-core/tests/core/utilities/testContainerUtils.ts @@ -1,80 +1,58 @@ +import { DatabaseImpl } from "../../../src/db" import { execSync } from "child_process" -let dockerPsResult: string | undefined - -function formatDockerPsResult(serverName: string, port: number) { - const lines = dockerPsResult?.split("\n") - let first = true - if (!lines) { - return null - } - for (let line of lines) { - if (first) { - first = false - continue - } - let toLookFor = serverName.split("-service")[0] - if (!line.includes(toLookFor)) { - continue - } - const regex = new RegExp(`0.0.0.0:([0-9]*)->${port}`, "g") - const found = line.match(regex) - if (found) { - return found[0].split(":")[1].split("->")[0] - } - } - return null +interface ContainerInfo { + Command: string + CreatedAt: string + ID: string + Image: string + Labels: string + LocalVolumes: string + Mounts: string + Names: string + Networks: string + Ports: string + RunningFor: string + Size: string + State: string + Status: string } -function getTestContainerSettings( - serverName: string, - key: string -): string | null { - const entry = Object.entries(global).find( - ([k]) => - k.includes(`${serverName.toUpperCase()}`) && - k.includes(`${key.toUpperCase()}`) - ) - if (!entry) { - return null - } - return entry[1] +function getTestcontainers(): ContainerInfo[] { + return execSync("docker ps --format json") + .toString() + .split("\n") + .filter(x => x.length > 0) + .map(x => JSON.parse(x) as ContainerInfo) + .filter(x => x.Labels.includes("org.testcontainers=true")) } -function getContainerInfo(containerName: string, port: number) { - let assignedPort = getTestContainerSettings( - containerName.toUpperCase(), - `PORT_${port}` - ) - if (!dockerPsResult) { - try { - const outputBuffer = execSync("docker ps") - dockerPsResult = outputBuffer.toString("utf8") - } catch (err) { - //no-op - } - } - const possiblePort = formatDockerPsResult(containerName, port) - if (possiblePort) { - assignedPort = possiblePort - } - const host = getTestContainerSettings(containerName.toUpperCase(), "IP") - return { - port: assignedPort, - host, - url: host && assignedPort && `http://${host}:${assignedPort}`, - } +function getContainerByImage(image: string) { + return getTestcontainers().find(x => x.Image.startsWith(image)) } -function getCouchConfig() { - return getContainerInfo("couchdb", 5984) +function getExposedPort(container: ContainerInfo, port: number) { + const match = container.Ports.match(new RegExp(`0.0.0.0:(\\d+)->${port}/tcp`)) + if (!match) { + return undefined + } + return parseInt(match[1]) } export function setupEnv(...envs: any[]) { - const couch = getCouchConfig() + const couch = getContainerByImage("budibase/couchdb") + if (!couch) { + throw new Error("CouchDB container not found") + } + + const couchPort = getExposedPort(couch, 5984) + if (!couchPort) { + throw new Error("CouchDB port not found") + } + const configs = [ - { key: "COUCH_DB_PORT", value: couch.port }, - { key: "COUCH_DB_URL", value: couch.url }, + { key: "COUCH_DB_PORT", value: `${couchPort}` }, + { key: "COUCH_DB_URL", value: `http://localhost:${couchPort}` }, ] for (const config of configs.filter(x => !!x.value)) { @@ -82,4 +60,7 @@ export function setupEnv(...envs: any[]) { env._set(config.key, config.value) } } + + // @ts-expect-error + DatabaseImpl.nano = undefined } diff --git a/packages/backend-core/tests/jestEnv.ts b/packages/backend-core/tests/jestEnv.ts index c2047118ec..2c797c9fff 100644 --- a/packages/backend-core/tests/jestEnv.ts +++ b/packages/backend-core/tests/jestEnv.ts @@ -4,3 +4,7 @@ process.env.NODE_ENV = "jest" process.env.MOCK_REDIS = "1" process.env.LOG_LEVEL = process.env.LOG_LEVEL || "error" process.env.REDIS_PASSWORD = "budibase" +process.env.COUCH_DB_PASSWORD = "budibase" +process.env.COUCH_DB_USER = "budibase" +process.env.API_ENCRYPTION_KEY = "testsecret" +process.env.JWT_SECRET = "testsecret" diff --git a/packages/backend-core/tests/jestSetup.ts b/packages/backend-core/tests/jestSetup.ts index 42a24ce733..e5d144290b 100644 --- a/packages/backend-core/tests/jestSetup.ts +++ b/packages/backend-core/tests/jestSetup.ts @@ -9,6 +9,7 @@ mocks.fetch.enable() // mock all dates to 2020-01-01T00:00:00.000Z // use tk.reset() to use real dates in individual tests import tk from "timekeeper" + tk.freeze(mocks.date.MOCK_DATE) if (!process.env.DEBUG) { diff --git a/packages/bbui/package.json b/packages/bbui/package.json index 3601af74e9..b4a9a3969c 100644 --- a/packages/bbui/package.json +++ b/packages/bbui/package.json @@ -24,8 +24,7 @@ "rollup": "^2.45.2", "rollup-plugin-postcss": "^4.0.0", "rollup-plugin-svelte": "^7.1.0", - "rollup-plugin-terser": "^7.0.2", - "svelte": "3.49.0" + "rollup-plugin-terser": "^7.0.2" }, "keywords": [ "svelte" diff --git a/packages/bbui/rollup.config.js b/packages/bbui/rollup.config.js index e285d548d6..da274e0ba5 100644 --- a/packages/bbui/rollup.config.js +++ b/packages/bbui/rollup.config.js @@ -12,6 +12,13 @@ export default { format: "esm", file: "dist/bbui.es.js", }, + onwarn(warning, warn) { + // suppress eval warnings + if (warning.code === "EVAL") { + return + } + warn(warning) + }, plugins: [ resolve(), commonjs(), diff --git a/packages/bbui/src/ActionButton/ActionButton.svelte b/packages/bbui/src/ActionButton/ActionButton.svelte index 427a98f888..c346e34d54 100644 --- a/packages/bbui/src/ActionButton/ActionButton.svelte +++ b/packages/bbui/src/ActionButton/ActionButton.svelte @@ -41,6 +41,7 @@ } + (showTooltip = true)} @@ -130,5 +131,6 @@ max-width: 150px; transform: translateX(-50%); text-align: center; + z-index: 1; } diff --git a/packages/bbui/src/ActionGroup/ActionGroup.svelte b/packages/bbui/src/ActionGroup/ActionGroup.svelte index 43d8cd8de5..978e920c42 100644 --- a/packages/bbui/src/ActionGroup/ActionGroup.svelte +++ b/packages/bbui/src/ActionGroup/ActionGroup.svelte @@ -1,5 +1,6 @@ + +
- + diff --git a/packages/bbui/src/Actions/click_outside.js b/packages/bbui/src/Actions/click_outside.js index e2cf38953c..305ae65ac8 100644 --- a/packages/bbui/src/Actions/click_outside.js +++ b/packages/bbui/src/Actions/click_outside.js @@ -35,19 +35,30 @@ const handleClick = event => { return } + // Ignore clicks for drawers, unless the handler is registered from a drawer + const sourceInDrawer = handler.anchor.closest(".drawer-wrapper") != null + const clickInDrawer = event.target.closest(".drawer-wrapper") != null + if (clickInDrawer && !sourceInDrawer) { + return + } + + if (handler.allowedType && event.type !== handler.allowedType) { + return + } + handler.callback?.(event) }) } document.documentElement.addEventListener("click", handleClick, true) -document.documentElement.addEventListener("contextmenu", handleClick, true) +document.documentElement.addEventListener("mousedown", handleClick, true) /** * Adds or updates a click handler */ -const updateHandler = (id, element, anchor, callback) => { +const updateHandler = (id, element, anchor, callback, allowedType) => { let existingHandler = clickHandlers.find(x => x.id === id) if (!existingHandler) { - clickHandlers.push({ id, element, anchor, callback }) + clickHandlers.push({ id, element, anchor, callback, allowedType }) } else { existingHandler.callback = callback } @@ -71,9 +82,11 @@ const removeHandler = id => { export default (element, opts) => { const id = Math.random() const update = newOpts => { - const callback = newOpts?.callback || newOpts + const callback = + newOpts?.callback || (typeof newOpts === "function" ? newOpts : null) const anchor = newOpts?.anchor || element - updateHandler(id, element, anchor, callback) + const allowedType = newOpts?.allowedType || "click" + updateHandler(id, element, anchor, callback, allowedType) } update(opts) return { diff --git a/packages/bbui/src/Actions/position_dropdown.js b/packages/bbui/src/Actions/position_dropdown.js index 7c4f7e8a2a..35ff4933d7 100644 --- a/packages/bbui/src/Actions/position_dropdown.js +++ b/packages/bbui/src/Actions/position_dropdown.js @@ -15,10 +15,10 @@ export default function positionDropdown(element, opts) { align, maxHeight, maxWidth, + minWidth, useAnchorWidth, offset = 5, customUpdate, - offsetBelow, } = opts if (!anchor) { return @@ -29,20 +29,28 @@ export default function positionDropdown(element, opts) { const elementBounds = element.getBoundingClientRect() let styles = { maxHeight: null, - minWidth: null, + minWidth, maxWidth, left: null, top: null, } if (typeof customUpdate === "function") { - styles = customUpdate(anchorBounds, elementBounds, styles) + styles = customUpdate(anchorBounds, elementBounds, { + ...styles, + offset: opts.offset, + }) } else { // Determine vertical styles const topSpace = anchorBounds.top const bottomSpace = window.innerHeight - anchorBounds.bottom - if (align === "right-outside") { - styles.top = anchorBounds.top + if (align === "right-outside" || align === "left-outside") { + styles.top = + anchorBounds.top + anchorBounds.height / 2 - elementBounds.height / 2 + styles.maxHeight = maxHeight + if (styles.top + elementBounds.height > window.innerHeight) { + styles.top = window.innerHeight - elementBounds.height + } } else if ( window.innerHeight - anchorBounds.bottom < (maxHeight || 100) && topSpace - bottomSpace > 100 @@ -50,7 +58,7 @@ export default function positionDropdown(element, opts) { styles.top = anchorBounds.top - elementBounds.height - offset styles.maxHeight = maxHeight || 240 } else { - styles.top = anchorBounds.bottom + (offsetBelow || offset) + styles.top = anchorBounds.bottom + offset styles.maxHeight = maxHeight || window.innerHeight - anchorBounds.bottom - 20 } diff --git a/packages/bbui/src/Avatar/Avatar.svelte b/packages/bbui/src/Avatar/Avatar.svelte index 0faf50f55a..e1d912ffa4 100644 --- a/packages/bbui/src/Avatar/Avatar.svelte +++ b/packages/bbui/src/Avatar/Avatar.svelte @@ -1,5 +1,6 @@ + + import "@spectrum-css/buttongroup/dist/index-vars.css" + export let vertical = false - export let gap = "" + export let gap = "M" $: gapStyle = gap === "L" diff --git a/packages/bbui/src/ColorPicker/ColorPicker.svelte b/packages/bbui/src/ColorPicker/ColorPicker.svelte index 2ba5309860..eb235ad153 100644 --- a/packages/bbui/src/ColorPicker/ColorPicker.svelte +++ b/packages/bbui/src/ColorPicker/ColorPicker.svelte @@ -123,6 +123,8 @@ } + +
+ +
diff --git a/packages/bbui/src/DetailSummary/DetailSummary.svelte b/packages/bbui/src/DetailSummary/DetailSummary.svelte index daa9f3f5ca..cbfdcbec9b 100644 --- a/packages/bbui/src/DetailSummary/DetailSummary.svelte +++ b/packages/bbui/src/DetailSummary/DetailSummary.svelte @@ -1,23 +1,22 @@ + +
{#if name}
@@ -81,7 +80,7 @@ var(--spacing-xl); } .property-panel.no-title { - padding: var(--spacing-xl); + padding-top: var(--spacing-xl); } .show { diff --git a/packages/bbui/src/Divider/Divider.svelte b/packages/bbui/src/Divider/Divider.svelte index e4f0f2fb61..f6837ca97c 100644 --- a/packages/bbui/src/Divider/Divider.svelte +++ b/packages/bbui/src/Divider/Divider.svelte @@ -1,5 +1,6 @@ + {#if visible} - -
- {#if !headless} + + +
+
+
0} + class:modal={$modal} + transition:drawerSlide|local + {style} + >
-
- {title} - - - -
+ {#if $$slots.title} + + {:else} +
{title || "Bindings"}
+ {/if}
+ {#if $resizable} + modal.set(!$modal)} + > + + + {/if}
- {/if} - -
+ +
+
+
{/if} diff --git a/packages/bbui/src/Drawer/DrawerContent.svelte b/packages/bbui/src/Drawer/DrawerContent.svelte index 944a3f4313..f7345afb11 100644 --- a/packages/bbui/src/Drawer/DrawerContent.svelte +++ b/packages/bbui/src/Drawer/DrawerContent.svelte @@ -1,4 +1,8 @@ -
+ + +
{#if $$slots.sidebar}