diff --git a/.all-contributorsrc b/.all-contributorsrc deleted file mode 100644 index 3a416f917e..0000000000 --- a/.all-contributorsrc +++ /dev/null @@ -1,194 +0,0 @@ -{ - "files": [ - "README.md" - ], - "imageSize": 100, - "commit": false, - "contributors": [ - { - "login": "shogunpurple", - "name": "Martin McKeaveney", - "avatar_url": "https://avatars1.githubusercontent.com/u/11256663?v=4", - "profile": "http://martinmck.com", - "contributions": [ - "code", - "doc", - "test", - "infra" - ] - }, - { - "login": "mike12345567", - "name": "Michael Drury", - "avatar_url": "https://avatars2.githubusercontent.com/u/4407001?v=4", - "profile": "http://www.michaeldrury.co.uk/", - "contributions": [ - "doc", - "code", - "test", - "infra" - ] - }, - { - "login": "aptkingston", - "name": "Andrew Kingston", - "avatar_url": "https://avatars3.githubusercontent.com/u/9075550?v=4", - "profile": "https://github.com/aptkingston", - "contributions": [ - "doc", - "code", - "test", - "design" - ] - }, - { - "login": "mjashanks", - "name": "Michael Shanks", - "avatar_url": "https://avatars3.githubusercontent.com/u/3524181?v=4", - "profile": "https://budibase.com/", - "contributions": [ - "doc", - "code", - "test" - ] - }, - { - "login": "kevmodrome", - "name": "Kevin Åberg Kultalahti", - "avatar_url": "https://avatars3.githubusercontent.com/u/534488?v=4", - "profile": "https://github.com/kevmodrome", - "contributions": [ - "doc", - "code", - "test" - ] - }, - { - "login": "joebudi", - "name": "Joe", - "avatar_url": "https://avatars2.githubusercontent.com/u/49767913?v=4", - "profile": "https://www.budibase.com/", - "contributions": [ - "doc", - "code", - "content", - "design" - ] - }, - { - "login": "Rory-Powell", - "name": "Rory Powell", - "avatar_url": "https://avatars.githubusercontent.com/u/8755148?v=4", - "profile": "https://github.com/Rory-Powell", - "contributions": [ - "code", - "doc", - "test" - ] - }, - { - "login": "PClmnt", - "name": "Peter Clement", - "avatar_url": "https://avatars.githubusercontent.com/u/5665926?v=4", - "profile": "https://github.com/PClmnt", - "contributions": [ - "code", - "doc", - "test" - ] - }, - { - "login": "Conor-Mack", - "name": "Conor_Mack", - "avatar_url": "https://avatars1.githubusercontent.com/u/36074859?v=4", - "profile": "https://github.com/Conor-Mack", - "contributions": [ - "code", - "test" - ] - }, - { - "login": "pngwn", - "name": "pngwn", - "avatar_url": "https://avatars1.githubusercontent.com/u/12937446?v=4", - "profile": "https://github.com/pngwn", - "contributions": [ - "code", - "test" - ] - }, - { - "login": "HugoLd", - "name": "HugoLd", - "avatar_url": "https://avatars0.githubusercontent.com/u/26521848?v=4", - "profile": "https://github.com/HugoLd", - "contributions": [ - "code" - ] - }, - { - "login": "victoriasloan", - "name": "victoriasloan", - "avatar_url": "https://avatars.githubusercontent.com/u/9913651?v=4", - "profile": "https://github.com/victoriasloan", - "contributions": [ - "code" - ] - }, - { - "login": "yashank09", - "name": "yashank09", - "avatar_url": "https://avatars.githubusercontent.com/u/37672190?v=4", - "profile": "https://github.com/yashank09", - "contributions": [ - "code" - ] - }, - { - "login": "SOVLOOKUP", - "name": "SOVLOOKUP", - "avatar_url": "https://avatars.githubusercontent.com/u/53158137?v=4", - "profile": "https://github.com/SOVLOOKUP", - "contributions": [ - "code" - ] - }, - { - "login": "seoulaja", - "name": "seoulaja", - "avatar_url": "https://avatars.githubusercontent.com/u/15101654?v=4", - "profile": "https://github.com/seoulaja", - "contributions": [ - "translation" - ] - }, - { - "login": "mslourens", - "name": "Maurits Lourens", - "avatar_url": "https://avatars.githubusercontent.com/u/1907152?v=4", - "profile": "https://github.com/mslourens", - "contributions": [ - "test", - "code" - ] - }, - { - "login": "Rory-Powell", - "name": "Rory Powell", - "avatar_url": "https://avatars.githubusercontent.com/u/8755148?v=4", - "profile": "https://github.com/Rory-Powell", - "contributions": [ - "infra", - "test", - "code" - ] - } - ], - "contributorsPerLine": 7, - "projectName": "budibase", - "projectOwner": "Budibase", - "repoType": "github", - "repoHost": "https://github.com", - "skipCi": true, - "commitConvention": "none" -} diff --git a/.eslintignore b/.eslintignore index 021fe8e367..a79f9e2879 100644 --- a/.eslintignore +++ b/.eslintignore @@ -8,3 +8,6 @@ packages/backend-core/coverage packages/server/client packages/builder/.routify packages/sdk/sdk +packages/account-portal/packages/server/build +packages/account-portal/packages/ui/.routify +packages/account-portal/packages/ui/build \ No newline at end of file diff --git a/.eslintrc.json b/.eslintrc.json index 75584b8163..79f7e56712 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -19,6 +19,7 @@ "bundle.js" ], "extends": ["eslint:recommended"], + "plugins": ["import", "eslint-plugin-local-rules"], "overrides": [ { "files": ["**/*.svelte"], @@ -30,7 +31,6 @@ "sourceType": "module", "allowImportExportEverywhere": true } - }, { "files": ["**/*.ts"], @@ -42,13 +42,25 @@ "no-case-declarations": "off", "no-useless-escape": "off", "no-undef": "off", - "no-prototype-builtins": "off" + "no-prototype-builtins": "off", + "local-rules/no-budibase-imports": "error" } } ], "rules": { "no-self-assign": "off", - "no-unused-vars": ["error", { "varsIgnorePattern": "^_", "argsIgnorePattern": "^_", "destructuredArrayIgnorePattern": "^_" }] + "no-unused-vars": [ + "error", + { + "varsIgnorePattern": "^_", + "argsIgnorePattern": "^_", + "destructuredArrayIgnorePattern": "^_" + } + ], + "import/no-relative-packages": "error", + "import/export": "error", + "import/no-duplicates": "error", + "import/newline-after-import": "error" }, "globals": { "GeolocationPositionError": true diff --git a/.github/workflows/README.md b/.github/workflows/README.md index 9b75a2e73a..029dd5af42 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -1,139 +1,45 @@ # Budibase CI Pipelines -Welcome to the budibase CI pipelines directory. This document details what each of the CI pipelines are for, and come common combinations. +Welcome to the Budibase CI pipelines directory. This document details what each of the CI pipelines are for, and come common combinations. ## All CI Pipelines -### Note - -- When running workflow dispatch jobs, ensure you always run them off the `master` branch. It defaults to `develop`, so double check before running any jobs. The exception to this case is the `deploy-release` job which requires the develop branch. - ### Standard CI Build Job (budibase_ci.yml) Triggers: -- PR or push to develop - PR or push to master -The standard CI Build job is what runs when you raise a PR to develop or master. +The standard CI Build job is what runs when you raise a PR to master. - Installs all dependencies, - builds the project - run the unit tests - Generate test coverage metrics with codecov - Run the integration tests +- Check that the pro and account portal submodules are pointing to the lastest master head -### Release Develop Job (release-develop.yml) +### Release Job (tag-release.yml) Triggers: -- Push to develop +- Manually triggered -The job responsible for building, tagging and pushing docker images out to the test and release environments. +This job is responsible for building and pushing all the production services, packages and images. This is done via [budibase-deploys](https://github.com/Budibase/budibase-deploys/actions/workflows/release.yml). -- Installs all dependencies -- builds the project -- run the unit tests -- publish the budibase JS packages under a prerelease tag to NPM -- build, tag and push docker images under the `develop` tag to docker hub +An input is required, indicating if the new version will be a `patch`, `minor` or `major` bump. -These images will then be pulled by the test and release environments, updating the latest automatically. Discord notifications are sent to the #infra channel when this occurs. - -### Release Job (release.yml) - -Triggers: - -- Push to master - -This job is responsible for building and pushing the latest code to NPM and docker hub, so that it can be deployed. - -- Installs all dependencies -- builds the project -- run the unit tests -- publish the budibase JS packages under a release tag to NPM (always incremented by patch versions) -- build, tag and push docker images under the `v.x.x.x` (the tag of the NPM release) tag to docker hub - -### Release Selfhost Job (release-selfhost.yml) - -Triggers: - -- Manual Workflow Dispatch Trigger - -This job is responsible for delivering the latest version of budibase to those that are self-hosting. - -This job relies on the release job to have run first, so the latest image is pushed to dockerhub. This job then will pull the latest version from `lerna.json` and try to find an image in dockerhub corresponding to that version. For example, if the version in `lerna.json` is `1.0.0`: - -- Pull the images for all budibase services tagged `v1.0.0` from dockerhub -- Tag these images as `latest` -- Push them back to dockerhub. This now means anyone who pulls `latest` (self hosters using docker-compose) will get the latest version. -- Build and release the budibase helm chart for kubernetes users -- Perform a github release with the latest version. You can see previous releases here (https://github.com/Budibase/budibase/releases) - -### Deploy Release (deploy-release.yml) - -Triggers: - -- Manual Workflow Dispatch Trigger - -This job is responsible for deploying to our release, cloud kubernetes environment. You must run the release job first, to ensure that the latest images have been built and pushed to docker hub. After kicking off this job, the following will occur: - -- Checks out the release branch -- Pulls the latest `values.yaml` from budibase infra, a private repo containing budibases infrastructure configuration -- Gets the latest budibase version from `lerna.json`, if it hasn't been specified in the workflow when you kicked it off -- Configures AWS Credentials -- Deploys the helm chart in the budibase repo to our preproduction EKS cluster, injecting the `values.yaml` we pulled from budibase-infra -- Fires off a discord webhook in the #infra channel to show that the deployment completely successfully. - -### Deploy Preprod (deploy-preprod.yml) - -Triggers: - -- Manual Workflow Dispatch Trigger - -This job is responsible for deploying to our preprod, cloud kubernetes environment. You must run the release job first, to ensure that the latest images have been built and pushed to docker hub. After kicking off this job, the following will occur: - -- Checks out the master branch -- Pulls the latest `values.yaml` from budibase infra, a private repo containing budibases infrastructure configuration -- Gets the latest budibase version from `lerna.json`, if it hasn't been specified in the workflow when you kicked it off -- Configures AWS Credentials -- Deploys the helm chart in the budibase repo to our preprod EKS cluster, injecting the `values.yaml` we pulled from budibase-infra -- Fires off a discord webhook in the #infra channel to show that the deployment completely successfully. - -### Deploy Production (deploy-cloud.yml) - -Triggers: - -- Manual Workflow Dispatch Trigger - -This job is responsible for deploying to our production, cloud kubernetes environment. You must run the release job first, to ensure that the latest images have been built and pushed to docker hub. You can also manually enter a version number for this job, so you can perform rollbacks or upgrade to a specific version. After kicking off this job, the following will occur: - -- Checks out the master branch -- Pulls the latest `values.yaml` from budibase infra, a private repo containing budibases infrastructure configuration -- Gets the latest budibase version from `lerna.json`, if it hasn't been specified in the workflow when you kicked it off -- Configures AWS Credentials -- Deploys the helm chart in the budibase repo to our production EKS cluster, injecting the `values.yaml` we pulled from budibase-infra -- Fires off a discord webhook in the #infra channel to show that the deployment completely successfully. +More documentation can be found in here: https://budibase.atlassian.net/wiki/spaces/DEVOPS/pages/347930625/Production+release ## Common Workflows ### Deploy Changes to Production (Release) -- Merge `develop` into `master` -- Wait for budibase CI job and release job to run -- Run cloud deploy job -- Run release selfhost job - -### Deploy Changes to Production (Hotfix) - -- Branch off `master` -- Perform your hotfix -- Merge back into `master` -- Wait for budibase CI job and release job to run -- Run cloud deploy job -- Run release selfhost job +- Merge your changes into `master` +- Run `tag-release.yml` +- Check the progress in [budibase-deploys](https://github.com/Budibase/budibase-deploys/actions/workflows/release.yml) ### Rollback A Bad Cloud Deployment -- Kick off cloud deploy job -- Ensure you are running off master -- Enter the version number of the last known good version of budibase. For example `1.0.0` +Rollback documentation can be found in here. +https://budibase.atlassian.net/wiki/spaces/DEVOPS/pages/347930625/Production+release#Rollback diff --git a/.github/workflows/budibase_ci.yml b/.github/workflows/budibase_ci.yml index 77867c8617..d6bbf19940 100644 --- a/.github/workflows/budibase_ci.yml +++ b/.github/workflows/budibase_ci.yml @@ -12,6 +12,13 @@ on: - master pull_request: workflow_dispatch: + workflow_call: + inputs: + run_as_oss: + type: boolean + required: false + description: Force running checks as if it was an OSS contributor + default: false env: BRANCH: ${{ github.event.pull_request.head.ref }} @@ -19,50 +26,41 @@ env: PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} NX_BASE_BRANCH: origin/${{ github.base_ref }} USE_NX_AFFECTED: ${{ github.event_name == 'pull_request' }} + IS_OSS_CONTRIBUTOR: ${{ inputs.run_as_oss == true || (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase') }} jobs: lint: runs-on: ubuntu-latest steps: - - name: Checkout repo and submodules + - name: Checkout repo uses: actions/checkout@v3 - if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase' with: - submodules: true + submodules: ${{ env.IS_OSS_CONTRIBUTOR == 'false' }} token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} - - name: Checkout repo only - uses: actions/checkout@v3 - if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase' - - name: Use Node.js 18.x + - name: Use Node.js 20.x uses: actions/setup-node@v3 with: - node-version: 18.x - cache: "yarn" + node-version: 20.x + cache: yarn - run: yarn --frozen-lockfile - run: yarn lint build: runs-on: ubuntu-latest steps: - - name: Checkout repo and submodules + - name: Checkout repo uses: actions/checkout@v3 - if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase' with: - submodules: true + submodules: ${{ env.IS_OSS_CONTRIBUTOR == 'false' }} token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} fetch-depth: 0 - - name: Checkout repo only - uses: actions/checkout@v3 - if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase' - with: - fetch-depth: 0 - - name: Use Node.js 18.x + - name: Use Node.js 20.x uses: actions/setup-node@v3 with: - node-version: 18.x - cache: "yarn" + node-version: 20.x + cache: yarn - run: yarn --frozen-lockfile # Run build all the projects @@ -78,27 +76,33 @@ jobs: yarn check:types fi - test-libraries: + helm-lint: runs-on: ubuntu-latest steps: - - name: Checkout repo and submodules + - name: Checkout repo uses: actions/checkout@v3 - if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase' - with: - submodules: true - token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} - fetch-depth: 0 - - name: Checkout repo only - uses: actions/checkout@v3 - if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase' with: fetch-depth: 0 - - name: Use Node.js 18.x + - name: Use Node.js 20.x + uses: azure/setup-helm@v3 + - run: cd charts/budibase && helm lint . + + test-libraries: + runs-on: ubuntu-latest + steps: + - name: Checkout repo + uses: actions/checkout@v3 + with: + submodules: ${{ env.IS_OSS_CONTRIBUTOR == 'false' }} + token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} + fetch-depth: 0 + + - name: Use Node.js 20.x uses: actions/setup-node@v3 with: - node-version: 18.x - cache: "yarn" + node-version: 20.x + cache: yarn - run: yarn --frozen-lockfile - name: Test run: | @@ -107,33 +111,22 @@ jobs: else yarn test --ignore=@budibase/worker --ignore=@budibase/server --ignore=@budibase/pro fi - - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos - name: codecov-umbrella - verbose: true test-worker: runs-on: ubuntu-latest steps: - - name: Checkout repo and submodules + - name: Checkout repo uses: actions/checkout@v3 - if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase' with: - submodules: true + submodules: ${{ env.IS_OSS_CONTRIBUTOR == 'false' }} token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} fetch-depth: 0 - - name: Checkout repo only - uses: actions/checkout@v3 - if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase' - with: - fetch-depth: 0 - - name: Use Node.js 18.x + - name: Use Node.js 20.x uses: actions/setup-node@v3 with: - node-version: 18.x - cache: "yarn" + node-version: 20.x + cache: yarn - run: yarn --frozen-lockfile - name: Test worker run: | @@ -143,33 +136,21 @@ jobs: yarn test --scope=@budibase/worker fi - - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN || github.token }} # not required for public repos - name: codecov-umbrella - verbose: true - test-server: runs-on: ubuntu-latest steps: - - name: Checkout repo and submodules + - name: Checkout repo uses: actions/checkout@v3 - if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase' with: - submodules: true + submodules: ${{ env.IS_OSS_CONTRIBUTOR == 'false' }} token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} fetch-depth: 0 - - name: Checkout repo only - uses: actions/checkout@v3 - if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase' - with: - fetch-depth: 0 - - name: Use Node.js 18.x + - name: Use Node.js 20.x uses: actions/setup-node@v3 with: - node-version: 18.x - cache: "yarn" + node-version: 20.x + cache: yarn - run: yarn --frozen-lockfile - name: Test server run: | @@ -179,12 +160,6 @@ jobs: yarn test --scope=@budibase/server fi - - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN || github.token }} # not required for public repos - name: codecov-umbrella - verbose: true - test-pro: runs-on: ubuntu-latest if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase' @@ -196,11 +171,11 @@ jobs: token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} fetch-depth: 0 - - name: Use Node.js 18.x + - name: Use Node.js 20.x uses: actions/setup-node@v3 with: - node-version: 18.x - cache: "yarn" + node-version: 20.x + cache: yarn - run: yarn --frozen-lockfile - name: Test run: | @@ -213,24 +188,23 @@ jobs: integration-test: runs-on: ubuntu-latest steps: - - name: Checkout repo and submodules + - name: Checkout repo uses: actions/checkout@v3 - if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase' with: - submodules: true + submodules: ${{ env.IS_OSS_CONTRIBUTOR == 'false' }} token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} - - name: Checkout repo only - uses: actions/checkout@v3 - if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'Budibase/budibase' - - name: Use Node.js 18.x + - name: Use Node.js 20.x uses: actions/setup-node@v3 with: - node-version: 18.x - cache: "yarn" + node-version: 20.x + cache: yarn - run: yarn --frozen-lockfile - name: Build packages run: yarn build --scope @budibase/server --scope @budibase/worker + - name: Build backend-core for OSS contributor (required for pro) + if: ${{ env.IS_OSS_CONTRIBUTOR == 'true' }} + run: yarn build --scope @budibase/backend-core - name: Run tests run: | cd qa-core @@ -242,7 +216,7 @@ jobs: check-pro-submodule: runs-on: ubuntu-latest - if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase' + if: inputs.run_as_oss != true && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase') steps: - name: Checkout repo and submodules uses: actions/checkout@v3 @@ -284,7 +258,57 @@ jobs: if (submoduleCommit !== baseCommit) { console.error('Submodule commit does not match the latest commit on the "${{ steps.get_pro_commits.outputs.target_branch }}" branch.'); - console.error('Refer to the pro repo to merge your changes: https://github.com/Budibase/budibase-pro/blob/develop/docs/getting_started.md') + console.error('Refer to the pro repo to merge your changes: https://github.com/Budibase/budibase-pro/blob/master/docs/getting_started.md') + process.exit(1); + } else { + console.log('All good, the submodule had been merged and setup correctly!') + } + + check-accountportal-submodule: + runs-on: ubuntu-latest + if: inputs.run_as_oss != true && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase') + steps: + - name: Checkout repo and submodules + uses: actions/checkout@v3 + with: + submodules: true + token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} + fetch-depth: 0 + + - name: Check account portal commit + id: get_accountportal_commits + run: | + cd packages/account-portal + accountportal_commit=$(git rev-parse HEAD) + + branch="${{ github.base_ref || github.ref_name }}" + echo "Running on branch '$branch' (base_ref=${{ github.base_ref }}, ref_name=${{ github.head_ref }})" + + base_commit=$(git rev-parse origin/master) + + if [[ ! -z $base_commit ]]; then + echo "target_branch=$branch" + echo "target_branch=$branch" >> "$GITHUB_OUTPUT" + echo "accountportal_commit=$accountportal_commit" + echo "accountportal_commit=$accountportal_commit" >> "$GITHUB_OUTPUT" + echo "base_commit=$base_commit" + echo "base_commit=$base_commit" >> "$GITHUB_OUTPUT" + else + echo "Nothing to do - branch to branch merge." + fi + + - name: Check submodule merged to base branch + if: ${{ steps.get_accountportal_commits.outputs.base_commit != '' }} + uses: actions/github-script@v4 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const submoduleCommit = '${{ steps.get_accountportal_commits.outputs.accountportal_commit }}'; + const baseCommit = '${{ steps.get_accountportal_commits.outputs.base_commit }}'; + + if (submoduleCommit !== baseCommit) { + console.error('Submodule commit does not match the latest commit on the "${{ steps.get_accountportal_commits.outputs.target_branch }}" branch.'); + console.error('Refer to the account portal repo to merge your changes: https://github.com/Budibase/account-portal/blob/master/docs/index.md') process.exit(1); } else { console.log('All good, the submodule had been merged and setup correctly!') diff --git a/.github/workflows/check-oss-contributor.yml b/.github/workflows/check-oss-contributor.yml new file mode 100644 index 0000000000..398f07a130 --- /dev/null +++ b/.github/workflows/check-oss-contributor.yml @@ -0,0 +1,35 @@ +name: OSS contributor checks +on: + workflow_dispatch: + schedule: + - cron: "0 8,16 * * 1-5" # on weekdays at 8am and 4pm + +jobs: + run-checks: + name: Publish server and worker docker images + uses: ./.github/workflows/budibase_ci.yml + with: + run_as_oss: true + secrets: inherit + + notify-error: + needs: ["run-checks"] + if: ${{ failure() }} + name: Notify error + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set commit SHA + id: set_sha + run: echo "::set-output name=sha::$(git rev-parse --short ${{ github.sha }})" + + - name: Notify error + uses: tsickert/discord-webhook@v5.3.0 + with: + webhook-url: ${{ secrets.OSS_CHECKS_WEBHOOK_URL }} + embed-title: 🚨 OSS checks failed in master + embed-url: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" + embed-description: | + Git sha: `${{ steps.set_sha.outputs.sha }}` diff --git a/.github/workflows/close-featurebranch.yml b/.github/workflows/close-featurebranch.yml index 46cb781730..5da3eb52cd 100644 --- a/.github/workflows/close-featurebranch.yml +++ b/.github/workflows/close-featurebranch.yml @@ -2,9 +2,7 @@ name: close-featurebranch on: pull_request: - types: [closed] - branches: - - master + types: [closed, unlabeled] workflow_dispatch: inputs: BRANCH: @@ -14,6 +12,9 @@ on: jobs: release: + if: | + (github.event.action == 'closed' && contains(github.event.pull_request.labels.*.name, 'feature-branch')) || + github.event.label.name == 'feature-branch' runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/deploy-cloud.yaml b/.github/workflows/deploy-cloud.yaml deleted file mode 100644 index 389b10f7d3..0000000000 --- a/.github/workflows/deploy-cloud.yaml +++ /dev/null @@ -1,48 +0,0 @@ -name: Budibase Deploy Production - -on: - workflow_dispatch: - inputs: - version: - description: Budibase release version. For example - 1.0.0 - required: false - -jobs: - release: - runs-on: ubuntu-latest - - steps: - - name: Fail if not a tag - run: | - if [[ $GITHUB_REF != refs/tags/* ]]; then - echo "Workflow Dispatch can only be run on tags" - exit 1 - fi - - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: Fail if tag is not in master - run: | - if ! git merge-base --is-ancestor ${{ github.sha }} origin/master; then - echo "Tag is not in master. This pipeline can only execute tags that are present on the master branch" - exit 1 - fi - - - name: Get the latest budibase release version - id: version - run: | - if [ -z "${{ github.event.inputs.version }}" ]; then - release_version=$(cat lerna.json | jq -r '.version') - else - release_version=${{ github.event.inputs.version }} - fi - echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV - - - uses: passeidireto/trigger-external-workflow-action@main - env: - PAYLOAD_VERSION: ${{ env.RELEASE_VERSION }} - with: - repository: budibase/budibase-deploys - event: budicloud-prod-deploy - github_pat: ${{ secrets.GH_ACCESS_TOKEN }} diff --git a/.github/workflows/deploy-featurebranch.yml b/.github/workflows/deploy-featurebranch.yml index f1fb12c087..a5636fe912 100644 --- a/.github/workflows/deploy-featurebranch.yml +++ b/.github/workflows/deploy-featurebranch.yml @@ -2,11 +2,19 @@ name: deploy-featurebranch on: pull_request: - branches: - - master + types: [ + labeled, + # default types below (https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request) + opened, + synchronize, + reopened, + ] jobs: release: + if: | + (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase') && + contains(github.event.pull_request.labels.*.name, 'feature-branch') runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/force-release.yml b/.github/workflows/force-release.yml new file mode 100644 index 0000000000..8a9d444f51 --- /dev/null +++ b/.github/workflows/force-release.yml @@ -0,0 +1,46 @@ +name: Forced release +concurrency: + group: tag-release + cancel-in-progress: false + +on: + workflow_dispatch: + +jobs: + ensure-is-master-tag: + name: Ensure is a master tag + runs-on: qa-arc-runner-set + steps: + - name: Checkout monorepo + uses: actions/checkout@v4 + with: + token: ${{ secrets.PERSONAL_ACCESS_TOKEN || github.token }} + fetch-tags: true + fetch-depth: 0 + + - name: Fail if ref is not a tag + run: | + if ! git show-ref -q --verify "refs/tags/${{ github.ref_name }}" 2>/dev/null; then + echo "'${{ github.ref_name }}' is not a valid tag." + exit 1 + fi + - name: Fail if tag is not in master + run: | + if ! git merge-base --is-ancestor ${{ github.ref_name }} origin/master; then + echo "Tag is not in master. Release can only execute tags that are present on the master branch" + exit 1 + fi + + trigger-release: + needs: [ensure-is-master-tag] + runs-on: ubuntu-latest + steps: + - uses: peter-evans/repository-dispatch@v2 + with: + repository: budibase/budibase-deploys + event-type: release-prod + token: ${{ secrets.GH_ACCESS_TOKEN }} + client-payload: |- + { + "TAG": "${{ github.ref_name }}" + } diff --git a/.github/workflows/release-master.yml b/.github/workflows/release-master.yml deleted file mode 100644 index 2edb470405..0000000000 --- a/.github/workflows/release-master.yml +++ /dev/null @@ -1,178 +0,0 @@ -name: Budibase Release -concurrency: - group: release - cancel-in-progress: false - -on: - push: - tags: - - "[0-9]+.[0-9]+.[0-9]+" - # Exclude all pre-releases - - "!*[0-9]+.[0-9]+.[0-9]+-*" - -env: - # Posthog token used by ui at build time - POSTHOG_TOKEN: phc_bIjZL7oh2GEUd2vqvTBH8WvrX0fWTFQMs6H5KQxiUxU - INTERCOM_TOKEN: ${{ secrets.INTERCOM_TOKEN }} - PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} - -jobs: - release-images: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - with: - submodules: true - token: ${{ secrets.PERSONAL_ACCESS_TOKEN }} - fetch-depth: 0 - - - name: Fail if tag is not in master - run: | - if ! git merge-base --is-ancestor ${{ github.sha }} origin/master; then - echo "Tag is not in master. This pipeline can only execute tags that are present on the master branch" - exit 1 - fi - - - uses: actions/setup-node@v1 - with: - node-version: 18.x - cache: yarn - - - run: yarn install --frozen-lockfile - - name: Update versions - run: ./scripts/updateVersions.sh - - run: yarn lint - - run: yarn build - - run: yarn build:sdk - - - name: Publish budibase packages to NPM - env: - NPM_TOKEN: ${{ secrets.NPM_TOKEN }} - run: | - # setup the username and email. I tend to use 'GitHub Actions Bot' with no email by default - git config --global user.name "Budibase Release Bot" - git config --global user.email "<>" - git submodule foreach git commit -a -m 'Release process' - git commit -a -m 'Release process' - echo //registry.npmjs.org/:_authToken=${NPM_TOKEN} >> .npmrc - yarn release - - - name: "Get Current tag" - id: currenttag - run: | - version=$(./scripts/getCurrentVersion.sh) - echo "Using tag $version" - echo "version=$version" >> "$GITHUB_OUTPUT" - - - name: Setup Docker Buildx - id: buildx - uses: docker/setup-buildx-action@v1 - - - name: Docker login - run: | - docker login -u $DOCKER_USER -p $DOCKER_PASSWORD - env: - DOCKER_USER: ${{ secrets.DOCKER_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }} - - - name: Build worker docker - uses: docker/build-push-action@v5 - with: - context: . - push: true - platforms: linux/amd64,linux/arm64 - build-args: | - BUDIBASE_VERSION=${{ env.BUDIBASE_VERSION }} - tags: ${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }} - file: ./packages/worker/Dockerfile.v2 - cache-from: type=registry,ref=${{ env.IMAGE_NAME }}:latest - cache-to: type=inline - env: - IMAGE_NAME: budibase/worker - IMAGE_TAG: ${{ steps.currenttag.outputs.version }} - BUDIBASE_VERSION: ${{ steps.currenttag.outputs.version }} - - - name: Build server docker - uses: docker/build-push-action@v5 - with: - context: . - push: true - platforms: linux/amd64,linux/arm64 - build-args: | - BUDIBASE_VERSION=${{ env.BUDIBASE_VERSION }} - tags: ${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }} - file: ./packages/server/Dockerfile.v2 - cache-from: type=registry,ref=${{ env.IMAGE_NAME }}:latest - cache-to: type=inline - env: - IMAGE_NAME: budibase/apps - IMAGE_TAG: ${{ steps.currenttag.outputs.version }} - BUDIBASE_VERSION: ${{ steps.currenttag.outputs.version }} - - - name: Build proxy docker - uses: docker/build-push-action@v5 - with: - context: ./hosting/proxy - push: true - platforms: linux/amd64,linux/arm64 - tags: ${{ env.IMAGE_NAME }}:${{ env.IMAGE_TAG }} - file: ./hosting/proxy/Dockerfile - cache-from: type=registry,ref=${{ env.IMAGE_NAME }}:latest - cache-to: type=inline - env: - IMAGE_NAME: budibase/proxy - IMAGE_TAG: ${{ steps.currenttag.outputs.version }} - - release-helm-chart: - needs: [release-images] - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Setup Helm - uses: azure/setup-helm@v1 - id: helm-install - - - name: Get the latest budibase release version - id: version - run: | - release_version=$(cat lerna.json | jq -r '.version') - echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV - - # due to helm repo index issue: https://github.com/helm/helm/issues/7363 - # we need to create new package in a different dir, merge the index and move the package back - - name: Build and release helm chart - run: | - git config user.name "Budibase Helm Bot" - git config user.email "<>" - git reset --hard - git fetch - mkdir sync - echo "Packaging chart to sync dir" - helm package charts/budibase --version 0.0.0-master --app-version "$RELEASE_VERSION" --destination sync - echo "Packaging successful" - git checkout gh-pages - echo "Indexing helm repo" - helm repo index --merge docs/index.yaml sync - mv -f sync/* docs - rm -rf sync - echo "Pushing new helm release" - git add -A - git commit -m "Helm Release: ${{ env.RELEASE_VERSION }}" - git push - - trigger-deploy-to-qa-env: - needs: [release-helm-chart] - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - uses: peter-evans/repository-dispatch@v2 - with: - repository: budibase/budibase-deploys - event-type: budicloud-qa-deploy - token: ${{ secrets.GH_ACCESS_TOKEN }} - client-payload: |- - { - "VERSION": "${{ github.ref_name }}", - "REF_NAME": "${{ github.ref_name}}" - } diff --git a/.github/workflows/release-selfhost.yml b/.github/workflows/release-selfhost.yml deleted file mode 100644 index d2689a0ea0..0000000000 --- a/.github/workflows/release-selfhost.yml +++ /dev/null @@ -1,125 +0,0 @@ -name: Budibase Release Selfhost - -on: - workflow_dispatch: - -jobs: - release: - runs-on: ubuntu-latest - - steps: - - name: Fail if not a tag - run: | - if [[ $GITHUB_REF != refs/tags/* ]]; then - echo "Workflow Dispatch can only be run on tags" - exit 1 - fi - - - uses: actions/checkout@v2 - with: - submodules: true - token: ${{ secrets.PERSONAL_ACCESS_TOKEN }} - fetch-depth: 0 - - - name: Fail if tag is not in master - run: | - if ! git merge-base --is-ancestor ${{ github.sha }} origin/master; then - echo "Tag is not in master. This pipeline can only execute tags that are present on the master branch" - exit 1 - fi - - - name: Use Node.js 18.x - uses: actions/setup-node@v1 - with: - node-version: 18.x - - - name: Get the latest budibase release version - id: version - run: | - release_version=$(cat lerna.json | jq -r '.version') - echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV - - - name: Tag and release Docker images (Self Host) - run: | - docker login -u $DOCKER_USER -p $DOCKER_PASSWORD - - release_tag=${{ env.RELEASE_VERSION }} - - # Pull apps and worker images - docker pull budibase/apps:$release_tag - docker pull budibase/worker:$release_tag - docker pull budibase/proxy:$release_tag - - # Tag apps and worker images - docker tag budibase/apps:$release_tag budibase/apps:$SELFHOST_TAG - docker tag budibase/worker:$release_tag budibase/worker:$SELFHOST_TAG - docker tag budibase/proxy:$release_tag budibase/proxy:$SELFHOST_TAG - - # Push images - docker push budibase/apps:$SELFHOST_TAG - docker push budibase/worker:$SELFHOST_TAG - docker push budibase/proxy:$SELFHOST_TAG - env: - DOCKER_USER: ${{ secrets.DOCKER_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKER_API_KEY }} - SELFHOST_TAG: latest - - - name: Bootstrap and build (CLI) - run: | - yarn - yarn build - - - name: Build OpenAPI spec - run: | - pushd packages/server - yarn - yarn specs - popd - - - name: Setup Helm - uses: azure/setup-helm@v1 - id: helm-install - - # due to helm repo index issue: https://github.com/helm/helm/issues/7363 - # we need to create new package in a different dir, merge the index and move the package back - - name: Build and release helm chart - run: | - git config user.name "Budibase Helm Bot" - git config user.email "<>" - git reset --hard - git fetch - mkdir sync - echo "Packaging chart to sync dir" - helm package charts/budibase --version "$RELEASE_VERSION" --app-version "$RELEASE_VERSION" --destination sync - echo "Packaging successful" - git checkout gh-pages - echo "Indexing helm repo" - helm repo index --merge docs/index.yaml sync - mv -f sync/* docs - rm -rf sync - echo "Pushing new helm release" - git add -A - git commit -m "Helm Release: ${{ env.RELEASE_VERSION }}" - git push - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Perform Github Release - uses: softprops/action-gh-release@v1 - with: - name: ${{ env.RELEASE_VERSION }} - tag_name: ${{ env.RELEASE_VERSION }} - generate_release_notes: true - files: | - packages/cli/build/cli-win.exe - packages/cli/build/cli-linux - packages/cli/build/cli-macos - packages/server/specs/openapi.yaml - packages/server/specs/openapi.json - - - name: Discord Webhook Action - uses: tsickert/discord-webhook@v4.0.0 - with: - webhook-url: ${{ secrets.PROD_DEPLOY_WEBHOOK_URL }} - content: "Self Host Deployment Complete: ${{ env.RELEASE_VERSION }} deployed to Self Host." - embed-title: ${{ env.RELEASE_VERSION }} diff --git a/.github/workflows/release-singleimage.yml b/.github/workflows/release-singleimage.yml deleted file mode 100644 index 16b1da186a..0000000000 --- a/.github/workflows/release-singleimage.yml +++ /dev/null @@ -1,86 +0,0 @@ -name: Deploy Budibase Single Container Image to DockerHub - -on: - workflow_dispatch: - -env: - CI: true - PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} - REGISTRY_URL: registry.hub.docker.com -jobs: - build: - name: "build" - runs-on: ubuntu-latest - strategy: - matrix: - node-version: [18.x] - steps: - - name: Maximize build space - uses: easimon/maximize-build-space@master - with: - root-reserve-mb: 30000 - swap-size-mb: 1024 - remove-android: "true" - remove-dotnet: "true" - - name: Fail if not a tag - run: | - if [[ $GITHUB_REF != refs/tags/* ]]; then - echo "Workflow Dispatch can only be run on tags" - exit 1 - fi - - name: "Checkout" - uses: actions/checkout@v2 - with: - submodules: true - token: ${{ secrets.PERSONAL_ACCESS_TOKEN }} - - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v1 - with: - node-version: ${{ matrix.node-version }} - - name: Setup QEMU - uses: docker/setup-qemu-action@v1 - - name: Setup Docker Buildx - id: buildx - uses: docker/setup-buildx-action@v1 - - name: Run Yarn - run: yarn - - name: Update versions - run: ./scripts/updateVersions.sh - - name: Run Yarn Build - run: yarn build - - name: Login to Docker Hub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_API_KEY }} - - name: Get the latest release version - id: version - run: | - release_version=$(cat lerna.json | jq -r '.version') - echo $release_version - echo "RELEASE_VERSION=$release_version" >> $GITHUB_ENV - - name: Tag and release Budibase service docker image - uses: docker/build-push-action@v2 - with: - context: . - push: true - platforms: linux/amd64,linux/arm64 - build-args: BUDIBASE_VERSION=${{ env.BUDIBASE_VERSION }} - tags: budibase/budibase,budibase/budibase:${{ env.RELEASE_VERSION }} - file: ./hosting/single/Dockerfile.v2 - env: - BUDIBASE_VERSION: ${{ env.RELEASE_VERSION }} - - name: Tag and release Budibase Azure App Service docker image - uses: docker/build-push-action@v2 - with: - context: . - push: true - platforms: linux/amd64 - build-args: | - TARGETBUILD=aas - BUDIBASE_VERSION=${{ env.BUDIBASE_VERSION }} - tags: budibase/budibase-aas,budibase/budibase-aas:${{ env.RELEASE_VERSION }} - file: ./hosting/single/Dockerfile.v2 - env: - BUDIBASE_VERSION: ${{ env.RELEASE_VERSION }} diff --git a/.github/workflows/stale_bot.yml b/.github/workflows/stale_bot.yml index 49e3473e63..411a70a463 100644 --- a/.github/workflows/stale_bot.yml +++ b/.github/workflows/stale_bot.yml @@ -10,13 +10,14 @@ jobs: steps: - uses: actions/stale@v8 with: + days-before-stale: 330 operations-per-run: 1 # stale rules for PRs days-before-pr-stale: 7 stale-issue-label: stale exempt-pr-labels: pinned,security,roadmap - days-before-pr-close: 7 + days-before-issue-close: 30 - uses: actions/stale@v8 with: @@ -25,6 +26,7 @@ jobs: days-before-stale: 30 only-issue-labels: bug,High priority stale-issue-label: warn + days-before-close: 30 - uses: actions/stale@v8 with: @@ -33,6 +35,7 @@ jobs: days-before-stale: 90 only-issue-labels: bug,Medium priority stale-issue-label: warn + days-before-close: 30 - uses: actions/stale@v8 with: @@ -42,5 +45,4 @@ jobs: stale-issue-label: stale only-issue-labels: bug stale-issue-message: "This issue has been automatically marked as stale because it has not had any activity for six months." - days-before-close: 30 diff --git a/.gitignore b/.gitignore index 02e0ca300d..3eb705dbbf 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,3 @@ -builder/* .data/ .temp/ packages/server/runtime_apps/ @@ -41,8 +40,11 @@ bower_components build/Release # Dependency directories -/node_modules/ jspm_packages/ +*.min.js +*.map +node_modules/ +dist/ # TypeScript v1 declaration files typings/ diff --git a/.gitmodules b/.gitmodules index 2dd6ea53f2..cb6d1c5dc8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ [submodule "packages/pro"] path = packages/pro url = git@github.com:Budibase/budibase-pro.git +[submodule "packages/account-portal"] + path = packages/account-portal + url = git@github.com:Budibase/account-portal.git diff --git a/.nvmrc b/.nvmrc index 7950a44576..790e1105f2 100644 --- a/.nvmrc +++ b/.nvmrc @@ -1 +1 @@ -v18.17.0 +v20.10.0 diff --git a/.prettierignore b/.prettierignore index 64607d74ab..2444f0e753 100644 --- a/.prettierignore +++ b/.prettierignore @@ -1,13 +1,14 @@ node_modules dist -*.spec.js -packages/builder/src/components/design/AppPreview/CurrentItemPreview.svelte packages/server/builder packages/server/coverage -packages/worker/coverage -packages/backend-core/coverage packages/server/client packages/server/src/definitions/openapi.ts +packages/worker/coverage +packages/backend-core/coverage packages/builder/.routify packages/sdk/sdk -packages/pro/coverage \ No newline at end of file +packages/pro/coverage +packages/account-portal/packages/ui/build +packages/account-portal/packages/ui/.routify +packages/account-portal/packages/server/build \ No newline at end of file diff --git a/.tool-versions b/.tool-versions index a909d60941..946d5198ce 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,3 +1,3 @@ -nodejs 18.17.0 +nodejs 20.10.0 python 3.10.0 yarn 1.22.19 diff --git a/.vscode/settings.json b/.vscode/settings.json index ece537efac..e22d5a8866 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,7 +1,7 @@ { "editor.formatOnSave": true, "editor.codeActionsOnSave": { - "source.fixAll": true + "source.fixAll": "explicit" }, "editor.defaultFormatter": "esbenp.prettier-vscode", "[json]": { diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000000..69d69ab7d0 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,3 @@ +/packages/server @Budibase/backend +/packages/worker @Budibase/backend +/packages/backend-core @Budibase/backend diff --git a/LICENSE b/LICENSE index a017209adf..cbb55109f4 100644 --- a/LICENSE +++ b/LICENSE @@ -1,7 +1,9 @@ -Copyright 2019-2021, Budibase Ltd. +Copyright 2019-2023, Budibase Ltd. Each Budibase package has its own license, please check the license file in each package. You can consider Budibase to be GPLv3 licensed overall. The apps that you build with Budibase do not package any GPLv3 licensed code, thus do not fall under those restrictions. + +Budibase ships with Structured Query Server, by The Neighbourhoodie Software GmbH. This license for this can be found at ./SQS_LICENSE diff --git a/SQS_LICENSE b/SQS_LICENSE new file mode 100644 index 0000000000..0315ee9527 --- /dev/null +++ b/SQS_LICENSE @@ -0,0 +1,31 @@ +FORM OF CUSTOMER LICENCE + +Budibase hereby grants the Customer a worldwide, royalty free, non-exclusive, +perpetual (for the lifetime of the intellectual property rights contained in the Product) +right and title to utilise the binary code of the The Neighbourhoodie Software GmbH +Structured Query Server software product (Product) for its own internal business +purposes (the Purpose) only (the Licence). The Product has the function of bringing a +CouchDB database (NoSQL database) into an SQL database form (SQLite) and thereby +making it usable for complex queries - which originally could only be displayed in an +SQL database. By indexing in SQLite and a server that is tailored to it, the Product +enables the use of CouchDB with SQL queries. +The Licence shall not permit sub-licensing, resale or transfer of the Product to third +parties, other than sub-licensing to the Customer’s direct contractors for the purposes +of utilizing the Product as contemplated above. +The Licence shall not permit the adaptation, modification, decompilation, reverse +engineering or similar activities with respect to the Product. +This licence is granted to the Customer only, although Customer and its Affiliates’ +employees, servants and agents shall be entitled to utilize the Product within the scope +of the Licence for the Customer’s Purpose only. +Reproduction is not permitted to users, except for reproductions that are necessary for +the use of the product under the licence described above. These conditions apply to the +product regardless of the form in which we make the product available and on which +devices it is installed and/or with which devices it is ultimately used. Depending on the +product variant or intended use, certain technical requirements in the IT infrastructure +must be satisfied as a prerequisite for use. +The law of the Northern Ireland applies exclusively to this licence, and the courts of +Northern Ireland shall have exclusive jurisdiction, save that we reserve a right to sue +you in the jurisdiction in which you are based. The application of the UN Sales +Convention (CISG) is excluded. +The invalidity of any part of this licence does not affect the validity of the remaining +regulations. diff --git a/charts/budibase/Chart.lock b/charts/budibase/Chart.lock index 75b9de07b5..3ee752a362 100644 --- a/charts/budibase/Chart.lock +++ b/charts/budibase/Chart.lock @@ -1,9 +1,6 @@ dependencies: - name: couchdb repository: https://apache.github.io/couchdb-helm - version: 3.3.4 -- name: ingress-nginx - repository: https://kubernetes.github.io/ingress-nginx - version: 4.0.13 -digest: sha256:20892705c2d8e64c98257d181063a514ac55013e2b43399a6e54868a97f97845 -generated: "2021-12-30T18:55:30.878411Z" + version: 4.3.0 +digest: sha256:94449a7f195b186f5af33ec5aa66d58b36bede240fae710f021ca87837b30606 +generated: "2023-11-20T17:43:02.777596Z" diff --git a/charts/budibase/Chart.yaml b/charts/budibase/Chart.yaml index 05b3f24dbd..e2c9378f2c 100644 --- a/charts/budibase/Chart.yaml +++ b/charts/budibase/Chart.yaml @@ -17,10 +17,6 @@ version: 0.0.0 appVersion: 0.0.0 dependencies: - name: couchdb - version: 3.3.4 + version: 4.3.0 repository: https://apache.github.io/couchdb-helm condition: services.couchdb.enabled - - name: ingress-nginx - version: 4.0.13 - repository: https://kubernetes.github.io/ingress-nginx - condition: ingress.nginx diff --git a/charts/budibase/README.md b/charts/budibase/README.md index efa78ba75c..342011bdb1 100644 --- a/charts/budibase/README.md +++ b/charts/budibase/README.md @@ -1,39 +1,228 @@ -# Budibase +# budibase -[Budibase](https://budibase.com/) Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes. - -## TL;DR; -```console -$ cd chart -$ helm install budibase . -``` - -## Introduction - -This chart bootstraps a [Budibase](https://budibase.com/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. +Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes. ## Prerequisites -- helm v3 or above +- `helm` v3 or above - Kubernetes 1.4+ -- PV provisioner support in the underlying infrastructure (with persistence storage enabled) +- A storage controller (if you want to use persistent storage) +- An ingress controller (if you want to define an `Ingress` resource) +- `metrics-server` (if you want to make use of horizontal pod autoscaling) -## Installing the Chart +## Chart dependencies -To install the chart with the release name `budi-release`: +This chart depends on the official Apache CouchDB chart. You can see its +documentation here: +. + +## Upgrading + +### `2.x` to `3.0.0` + +We made a number of breaking changes in this release to make the chart more +idiomatic and easier to use. + +1. We no longer bundle `ingress-nginx`. If you were relying on this to supply + an ingress controller to your cluster, you will now need to deploy that + separately. You'll find guidance for that here: + . +2. We've upgraded the version of the [CouchDB chart](https://github.com/apache/couchdb-helm) + we use from `3.3.4` to `4.3.0`. The primary motivation for this was to align + the CouchDB chart used with the CouchDB version used, which has also updated + from 3.1.1 to 3.2.1. Additionally, we're moving away from the official CouchDB + to one we're building ourselves. +3. We've separated out the supplied AWS ALB ingress resource for those deploying + into EKS. Where previously you enabled this by setting `ingress.enabled: false` + and `ingress.aws: true`, you now set `awsAlbIngress.enabled: true` and all + configuration for it is under `awsAlbIngress`. +4. The `HorizontalPodAutoscaler` that was configured at `hpa.enabled: true` has + been split into 3 separate HPAs, one for each of `apps`, `worker`, and `proxy`. + They are configured at `services.{apps,worker,proxy}.autoscaling`. + +## Installing + +To install the chart from our repository: ```console -$ helm install budi-release . +$ helm repo add budibase https://budibase.github.io/budibase/ +$ helm repo update +$ helm install --create-namespace --namespace budibase budibase budibase/budibase ``` -The command deploys Budibase on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. - -> **Tip**: List all releases using `helm list` - -## Uninstalling the Chart - -To uninstall/delete the `my-release` deployment: +To install the chart from this repo: ```console -$ helm delete my-release +$ git clone git@github.com:budibase/budibase.git +$ cd budibase/charts/budibase +$ helm install --create-namespace --namespace budibase budibase . ``` + +## Example minimal configuration + +Here's an example `values.yaml` that would get a Budibase instance running in a home +cluster using an nginx ingress controller and NFS as cluster storage (basically one of our +staff's homelabs). + +
+ +```yaml +ingress: + enabled: true + className: "nginx" + hosts: + - host: budibase.local # set this to whatever DNS name you'd use + paths: + - backend: + service: + name: proxy-service + port: + number: 10000 + path: / + pathType: Prefix + +couchdb: + persistentVolume: + enabled: true + storageClass: "nfs-client" + adminPassword: admin + +services: + objectStore: + storageClass: "nfs-client" + redis: + storageClass: "nfs-client" +``` + +If you wanted to use this when bringing up Budibase in your own cluster, you could save it +to your hard disk and run the following: + +```console +$ helm install --create-namespace --namespace budibase budibase . -f values.yaml +``` + +
+ +## Configuring + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Sets the affinity for all pods created by this chart. Should not ordinarily need to be changed. See for more information on affinity. | +| awsAlbIngress.certificateArn | string | `""` | If you're wanting to use HTTPS, you'll need to create an ACM certificate and specify the ARN here. | +| awsAlbIngress.enabled | bool | `false` | Whether to create an ALB Ingress resource pointing to the Budibase proxy. Requires the AWS ALB Ingress Controller. | +| couchdb.clusterSize | int | `1` | The number of replicas to run in the CouchDB cluster. We set this to 1 by default to make things simpler, but you can set it to 3 if you need a high-availability CouchDB cluster. | +| couchdb.couchdbConfig.couchdb.uuid | string | `"budibase-couchdb"` | Unique identifier for this CouchDB server instance. You shouldn't need to change this. | +| couchdb.image | object | `{}` | We use a custom CouchDB image for running Budibase and we don't support using any other CouchDB image. You shouldn't change this, and if you do we can't guarantee that Budibase will work. | +| globals.apiEncryptionKey | string | `""` | Used for encrypting API keys and environment variables when stored in the database. You don't need to set this if `createSecrets` is true. | +| globals.appVersion | string | `""` | The version of Budibase to deploy. Defaults to what's specified by {{ .Chart.AppVersion }}. Ends up being used as the image version tag for the apps, proxy, and worker images. | +| globals.automationMaxIterations | string | `"200"` | The maximum number of iterations allows for an automation loop step. You can read more about looping here: . | +| globals.budibaseEnv | string | `"PRODUCTION"` | Sets the environment variable BUDIBASE_ENVIRONMENT for the apps and worker pods. Should not ordinarily need to be changed. | +| globals.cookieDomain | string | `""` | Sets the domain attribute of the cookie that Budibase uses to store session information. See for details on why you might want to set this. | +| globals.createSecrets | bool | `true` | Create an internal API key, JWT secret, object store access key and secret, and store them in a Kubernetes `Secret`. | +| globals.enableAnalytics | string | `"1"` | Whether to enable analytics or not. You can read more about our analytics here: . | +| globals.google | object | `{"clientId":"","secret":""}` | Google OAuth settings. These can also be set in the Budibase UI, see for details. | +| globals.google.clientId | string | `""` | Client ID of your Google OAuth app. | +| globals.google.secret | string | `""` | Client secret of your Google OAuth app. | +| globals.httpMigrations | string | `"0"` | Whether or not to enable doing data migrations over the HTTP API. If this is set to "0", migrations are run on startup. You shouldn't ordinarily need to change this. | +| globals.internalApiKey | string | `""` | API key used for internal Budibase API calls. You don't need to set this if `createSecrets` is true. | +| globals.internalApiKeyFallback | string | `""` | A fallback value for `internalApiKey`. If you're rotating your encryption key, you can set this to the old value for the duration of the rotation. | +| globals.jwtSecret | string | `""` | Secret used for signing JWTs. You don't need to set this if `createSecrets` is true. | +| globals.jwtSecretFallback | string | `""` | A fallback value for `jwtSecret`. If you're rotating your JWT secret, you can set this to the old value for the duration of the rotation. | +| globals.platformUrl | string | `""` | Set the `platformUrl` binding. You can also do this in Settings > Organisation if you are self-hosting. | +| globals.smtp.enabled | bool | `false` | Whether to enable SMTP or not. | +| globals.smtp.from | string | `""` | The email address to use in the "From:" field of emails sent by Budibase. | +| globals.smtp.host | string | `""` | The hostname of your SMTP server. | +| globals.smtp.password | string | `""` | The password to use when authenticating with your SMTP server. | +| globals.smtp.port | string | `"587"` | The port of your SMTP server. | +| globals.smtp.user | string | `""` | The username to use when authenticating with your SMTP server. | +| globals.tenantFeatureFlags | string | `"*:LICENSING,*:USER_GROUPS,*:ONBOARDING_TOUR"` | Sets what feature flags are enabled and for which tenants. Should not ordinarily need to be changed. | +| imagePullSecrets | list | `[]` | Passed to all pods created by this chart. Should not ordinarily need to be changed. | +| ingress.className | string | `""` | What ingress class to use. | +| ingress.enabled | bool | `true` | Whether to create an Ingress resource pointing to the Budibase proxy. | +| ingress.hosts | list | `[]` | Standard hosts block for the Ingress resource. Defaults to pointing to the Budibase proxy. | +| nameOverride | string | `""` | Override the name of the deploymen. Defaults to {{ .Chart.Name }}. | +| service.port | int | `10000` | Port to expose on the service. | +| service.type | string | `"ClusterIP"` | Service type for the service that points to the main Budibase proxy pod. | +| serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| serviceAccount.create | bool | `true` | Specifies whether a service account should be created | +| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | +| services.apps.autoscaling.enabled | bool | `false` | Whether to enable horizontal pod autoscaling for the apps service. | +| services.apps.autoscaling.maxReplicas | int | `10` | | +| services.apps.autoscaling.minReplicas | int | `1` | | +| services.apps.autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization percentage for the apps service. Note that for autoscaling to work, you will need to have metrics-server configured, and resources set for the apps pods. | +| services.apps.httpLogging | int | `1` | Whether or not to log HTTP requests to the apps service. | +| services.apps.livenessProbe | object | HTTP health checks. | Liveness probe configuration for apps pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.apps.logLevel | string | `"info"` | The log level for the apps service. | +| services.apps.readinessProbe | object | HTTP health checks. | Readiness probe configuration for apps pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.apps.replicaCount | int | `1` | The number of apps replicas to run. | +| services.apps.resources | object | `{}` | The resources to use for apps pods. See for more information on how to set these. | +| services.apps.startupProbe | object | HTTP health checks. | Startup probe configuration for apps pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.automationWorkers.autoscaling.enabled | bool | `false` | Whether to enable horizontal pod autoscaling for the apps service. | +| services.automationWorkers.autoscaling.maxReplicas | int | `10` | | +| services.automationWorkers.autoscaling.minReplicas | int | `1` | | +| services.automationWorkers.autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization percentage for the automation worker service. Note that for autoscaling to work, you will need to have metrics-server configured, and resources set for the automation worker pods. | +| services.automationWorkers.enabled | bool | `true` | Whether or not to enable the automation worker service. If you disable this, automations will be processed by the apps service. | +| services.automationWorkers.livenessProbe | object | HTTP health checks. | Liveness probe configuration for automation worker pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.automationWorkers.logLevel | string | `"info"` | The log level for the automation worker service. | +| services.automationWorkers.readinessProbe | object | HTTP health checks. | Readiness probe configuration for automation worker pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.automationWorkers.replicaCount | int | `1` | The number of automation worker replicas to run. | +| services.automationWorkers.resources | object | `{}` | The resources to use for automation worker pods. See for more information on how to set these. | +| services.automationWorkers.startupProbe | object | HTTP health checks. | Startup probe configuration for automation worker pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.couchdb.backup.enabled | bool | `false` | Whether or not to enable periodic CouchDB backups. This works by replicating to another CouchDB instance. | +| services.couchdb.backup.interval | string | `""` | Backup interval in seconds | +| services.couchdb.backup.resources | object | `{}` | The resources to use for CouchDB backup pods. See for more information on how to set these. | +| services.couchdb.backup.target | string | `""` | Target couchDB instance to back up to, either a hostname or an IP address. | +| services.couchdb.enabled | bool | `true` | Whether or not to spin up a CouchDB instance in your cluster. True by default, and the configuration for the CouchDB instance is under the `couchdb` key at the root of this file. You can see what options are available to you by looking at the official CouchDB Helm chart: . | +| services.couchdb.port | int | `5984` | | +| services.dns | string | `"cluster.local"` | The DNS suffix to use for service discovery. You only need to change this if you've configured your cluster to use a different DNS suffix. | +| services.objectStore.accessKey | string | `""` | AWS_ACCESS_KEY if using S3 | +| services.objectStore.browser | bool | `true` | Whether to enable the Minio web console or not. If you're exposing Minio to the Internet (via a custom Ingress record, for example), you should set this to false. If you're only exposing Minio to your cluster, you can leave this as true. | +| services.objectStore.cloudfront.cdn | string | `""` | Set the url of a distribution to enable cloudfront. | +| services.objectStore.cloudfront.privateKey64 | string | `""` | Base64 encoded private key for the above public key. | +| services.objectStore.cloudfront.publicKeyId | string | `""` | ID of public key stored in cloudfront. | +| services.objectStore.minio | bool | `true` | Set to false if using another object store, such as S3. You will need to set `services.objectStore.url` to point to your bucket if you do this. | +| services.objectStore.region | string | `""` | AWS_REGION if using S3 | +| services.objectStore.resources | object | `{}` | The resources to use for Minio pods. See for more information on how to set these. | +| services.objectStore.secretKey | string | `""` | AWS_SECRET_ACCESS_KEY if using S3 | +| services.objectStore.storage | string | `"100Mi"` | How much storage to give Minio in its PersistentVolumeClaim. | +| services.objectStore.storageClass | string | `""` | If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| services.objectStore.url | string | `"http://minio-service:9000"` | URL to use for object storage. Only change this if you're using an external object store, such as S3. Remember to set `minio: false` if you do this. | +| services.proxy.autoscaling.enabled | bool | `false` | Whether to enable horizontal pod autoscaling for the proxy service. | +| services.proxy.autoscaling.maxReplicas | int | `10` | | +| services.proxy.autoscaling.minReplicas | int | `1` | | +| services.proxy.autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization percentage for the proxy service. Note that for autoscaling to work, you will need to have metrics-server configured, and resources set for the proxy pods. | +| services.proxy.livenessProbe | object | HTTP health checks. | Liveness probe configuration for proxy pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.proxy.readinessProbe | object | HTTP health checks. | Readiness probe configuration for proxy pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.proxy.replicaCount | int | `1` | The number of proxy replicas to run. | +| services.proxy.resources | object | `{}` | The resources to use for proxy pods. See for more information on how to set these. | +| services.proxy.startupProbe | object | HTTP health checks. | Startup probe configuration for proxy pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.redis.enabled | bool | `true` | Whether or not to deploy a Redis pod into your cluster. | +| services.redis.password | string | `"budibase"` | The password to use when connecting to Redis. It's recommended that you change this from the default if you're running Redis in-cluster. | +| services.redis.port | int | `6379` | Port to expose Redis on. | +| services.redis.resources | object | `{}` | The resources to use for Redis pods. See for more information on how to set these. | +| services.redis.storage | string | `"100Mi"` | How much persistent storage to allocate to Redis. | +| services.redis.storageClass | string | `""` | If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| services.redis.url | string | `""` | If you choose to run Redis externally to this chart, you can specify the connection details here. | +| services.worker.autoscaling.enabled | bool | `false` | Whether to enable horizontal pod autoscaling for the worker service. | +| services.worker.autoscaling.maxReplicas | int | `10` | | +| services.worker.autoscaling.minReplicas | int | `1` | | +| services.worker.autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization percentage for the worker service. Note that for autoscaling to work, you will need to have metrics-server configured, and resources set for the worker pods. | +| services.worker.httpLogging | int | `1` | Whether or not to log HTTP requests to the worker service. | +| services.worker.livenessProbe | object | HTTP health checks. | Liveness probe configuration for worker pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.worker.logLevel | string | `"info"` | The log level for the worker service. | +| services.worker.readinessProbe | object | HTTP health checks. | Readiness probe configuration for worker pods. You shouldn't need to change this, but if you want to you can find more information here: | +| services.worker.replicaCount | int | `1` | The number of worker replicas to run. | +| services.worker.resources | object | `{}` | The resources to use for worker pods. See for more information on how to set these. | +| services.worker.startupProbe | object | HTTP health checks. | Startup probe configuration for worker pods. You shouldn't need to change this, but if you want to you can find more information here: | +| tolerations | list | `[]` | Sets the tolerations for all pods created by this chart. Should not ordinarily need to be changed. See for more information on tolerations. | + +## Uninstalling + +To uninstall the chart, assuming you named the release `budibase` (both commands in the installation section do so): + +```console +$ helm uninstall --namespace budibase budibase +``` + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.3](https://github.com/norwoodj/helm-docs/releases/v1.11.3) diff --git a/charts/budibase/README.md.gotmpl b/charts/budibase/README.md.gotmpl new file mode 100644 index 0000000000..e37c323837 --- /dev/null +++ b/charts/budibase/README.md.gotmpl @@ -0,0 +1,117 @@ +{{ template "chart.header" . }} +{{ template "chart.description" . }} + +## Prerequisites + +- `helm` v3 or above +- Kubernetes 1.4+ +- A storage controller (if you want to use persistent storage) +- An ingress controller (if you want to define an `Ingress` resource) +- `metrics-server` (if you want to make use of horizontal pod autoscaling) + +## Chart dependencies + +This chart depends on the official Apache CouchDB chart. You can see its +documentation here: +. + +## Upgrading + +### `2.x` to `3.0.0` + +We made a number of breaking changes in this release to make the chart more +idiomatic and easier to use. + +1. We no longer bundle `ingress-nginx`. If you were relying on this to supply + an ingress controller to your cluster, you will now need to deploy that + separately. You'll find guidance for that here: + . +2. We've upgraded the version of the [CouchDB chart](https://github.com/apache/couchdb-helm) + we use from `3.3.4` to `4.3.0`. The primary motivation for this was to align + the CouchDB chart used with the CouchDB version used, which has also updated + from 3.1.1 to 3.2.1. Additionally, we're moving away from the official CouchDB + to one we're building ourselves. +3. We've separated out the supplied AWS ALB ingress resource for those deploying + into EKS. Where previously you enabled this by setting `ingress.enabled: false` + and `ingress.aws: true`, you now set `awsAlbIngress.enabled: true` and all + configuration for it is under `awsAlbIngress`. +4. The `HorizontalPodAutoscaler` that was configured at `hpa.enabled: true` has + been split into 3 separate HPAs, one for each of `apps`, `worker`, and `proxy`. + They are configured at `services.{apps,worker,proxy}.autoscaling`. + +## Installing + +To install the chart from our repository: + +```console +$ helm repo add budibase https://budibase.github.io/budibase/ +$ helm repo update +$ helm install --create-namespace --namespace budibase budibase budibase/budibase +``` + +To install the chart from this repo: + +```console +$ git clone git@github.com:budibase/budibase.git +$ cd budibase/charts/budibase +$ helm install --create-namespace --namespace budibase budibase . +``` + +## Example minimal configuration + +Here's an example `values.yaml` that would get a Budibase instance running in a home +cluster using an nginx ingress controller and NFS as cluster storage (basically one of our +staff's homelabs). + +
+ +```yaml +ingress: + enabled: true + className: "nginx" + hosts: + - host: budibase.local # set this to whatever DNS name you'd use + paths: + - backend: + service: + name: proxy-service + port: + number: 10000 + path: / + pathType: Prefix + +couchdb: + persistentVolume: + enabled: true + storageClass: "nfs-client" + adminPassword: admin + +services: + objectStore: + storageClass: "nfs-client" + redis: + storageClass: "nfs-client" +``` + +If you wanted to use this when bringing up Budibase in your own cluster, you could save it +to your hard disk and run the following: + +```console +$ helm install --create-namespace --namespace budibase budibase . -f values.yaml +``` + +
+ +## Configuring + +{{ template "chart.valuesTable" . }} + +## Uninstalling + +To uninstall the chart, assuming you named the release `budibase` (both commands in the installation section do so): + +```console +$ helm uninstall --namespace budibase budibase +``` + +{{ template "helm-docs.versionFooter" . }} diff --git a/charts/budibase/charts/couchdb-3.3.4.tgz b/charts/budibase/charts/couchdb-3.3.4.tgz deleted file mode 100644 index f7ebfd3e96..0000000000 Binary files a/charts/budibase/charts/couchdb-3.3.4.tgz and /dev/null differ diff --git a/charts/budibase/charts/couchdb-4.3.0.tgz b/charts/budibase/charts/couchdb-4.3.0.tgz new file mode 100644 index 0000000000..d3cce28ee6 Binary files /dev/null and b/charts/budibase/charts/couchdb-4.3.0.tgz differ diff --git a/charts/budibase/charts/ingress-nginx-4.0.13.tgz b/charts/budibase/charts/ingress-nginx-4.0.13.tgz deleted file mode 100644 index 1e34215c5f..0000000000 Binary files a/charts/budibase/charts/ingress-nginx-4.0.13.tgz and /dev/null differ diff --git a/charts/budibase/templates/alb-ingress.yaml b/charts/budibase/templates/alb-ingress.yaml index 6cd1cf2cba..89b4e9e2cb 100644 --- a/charts/budibase/templates/alb-ingress.yaml +++ b/charts/budibase/templates/alb-ingress.yaml @@ -1,4 +1,4 @@ -{{- if .Values.ingress.aws }} +{{- if .Values.awsAlbIngress.enabled }} apiVersion: networking.k8s.io/v1 kind: Ingress metadata: @@ -7,24 +7,24 @@ metadata: kubernetes.io/ingress.class: alb alb.ingress.kubernetes.io/scheme: internet-facing alb.ingress.kubernetes.io/target-type: ip - alb.ingress.kubernetes.io/success-codes: 200,301 - alb.ingress.kubernetes.io/healthcheck-path: / - {{- if .Values.ingress.certificateArn }} + alb.ingress.kubernetes.io/success-codes: '200' + alb.ingress.kubernetes.io/healthcheck-path: '/health' + {{- if .Values.awsAlbIngress.certificateArn }} alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' - alb.ingress.kubernetes.io/certificate-arn: {{ .Values.ingress.certificateArn }} + alb.ingress.kubernetes.io/certificate-arn: {{ .Values.awsAlbIngress.certificateArn }} {{- end }} - {{- if .Values.ingress.sslPolicy }} - alb.ingress.kubernetes.io/actions.ssl-policy: {{ .Values.ingress.sslPolicy }} + {{- if .Values.awsAlbIngress.sslPolicy }} + alb.ingress.kubernetes.io/actions.ssl-policy: {{ .Values.awsAlbIngress.sslPolicy }} {{- end }} - {{- if .Values.ingress.securityGroups }} - alb.ingress.kubernetes.io/security-groups: {{ .Values.ingress.securityGroups }} + {{- if .Values.awsAlbIngress.securityGroups }} + alb.ingress.kubernetes.io/security-groups: {{ .Values.awsAlbIngress.securityGroups }} {{- end }} spec: rules: - http: paths: - {{- if .Values.ingress.certificateArn }} + {{- if .Values.awsAlbIngress.certificateArn }} - path: / pathType: Prefix backend: diff --git a/charts/budibase/templates/app-service-deployment.yaml b/charts/budibase/templates/app-service-deployment.yaml index 73c6d990d2..c7c4481122 100644 --- a/charts/budibase/templates/app-service-deployment.yaml +++ b/charts/budibase/templates/app-service-deployment.yaml @@ -2,12 +2,9 @@ apiVersion: apps/v1 kind: Deployment metadata: annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) {{ if .Values.services.apps.deploymentAnnotations }} {{- toYaml .Values.services.apps.deploymentAnnotations | indent 4 -}} {{ end }} - creationTimestamp: null labels: io.kompose.service: app-service {{ if .Values.services.apps.deploymentLabels }} @@ -24,12 +21,9 @@ spec: template: metadata: annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) {{ if .Values.services.apps.templateAnnotations }} {{- toYaml .Values.services.apps.templateAnnotations | indent 8 -}} {{ end }} - creationTimestamp: null labels: io.kompose.service: app-service {{ if .Values.services.apps.templateLabels }} @@ -198,7 +192,14 @@ spec: - name: NODE_TLS_REJECT_UNAUTHORIZED value: {{ .Values.services.tlsRejectUnauthorized }} {{ end }} - + {{- if .Values.services.automationWorkers.enabled }} + - name: APP_FEATURES + value: "api" + {{- end }} + {{- range .Values.services.apps.extraEnv }} + - name: {{ .name }} + value: {{ .value | quote }} + {{- end }} image: budibase/apps:{{ .Values.globals.appVersion | default .Chart.AppVersion }} imagePullPolicy: Always {{- if .Values.services.apps.startupProbe }} @@ -226,6 +227,14 @@ spec: resources: {{- toYaml . | nindent 10 }} {{ end }} + {{ if .Values.services.apps.command }} + command: + {{- toYaml .Values.services.apps.command | nindent 10 }} + {{ end }} + {{ if .Values.services.apps.args }} + args: + {{- toYaml .Values.services.apps.args | nindent 10 }} + {{ end }} {{- with .Values.affinity }} affinity: {{- toYaml . | nindent 8 }} @@ -243,4 +252,10 @@ spec: {{ end }} restartPolicy: Always serviceAccountName: "" + {{ if .Values.services.apps.ndots }} + dnsConfig: + options: + - name: ndots + value: {{ .Values.services.apps.ndots | quote }} + {{ end }} status: {} diff --git a/charts/budibase/templates/app-service-hpa.yaml b/charts/budibase/templates/app-service-hpa.yaml new file mode 100644 index 0000000000..e819ecb9e3 --- /dev/null +++ b/charts/budibase/templates/app-service-hpa.yaml @@ -0,0 +1,32 @@ +{{- if .Values.services.apps.autoscaling.enabled }} +apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "budibase.fullname" . }}-apps + labels: + {{- include "budibase.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: app-service + minReplicas: {{ .Values.services.apps.autoscaling.minReplicas }} + maxReplicas: {{ .Values.services.apps.autoscaling.maxReplicas }} + metrics: + {{- if .Values.services.apps.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.services.apps.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.services.apps.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.services.apps.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/budibase/templates/app-service-service.yaml b/charts/budibase/templates/app-service-service.yaml index 5247b4de09..6d19590d45 100644 --- a/charts/budibase/templates/app-service-service.yaml +++ b/charts/budibase/templates/app-service-service.yaml @@ -1,10 +1,6 @@ apiVersion: v1 kind: Service metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: io.kompose.service: app-service name: app-service diff --git a/charts/budibase/templates/automation-worker-service-deployment.yaml b/charts/budibase/templates/automation-worker-service-deployment.yaml new file mode 100644 index 0000000000..36c3a8ffbf --- /dev/null +++ b/charts/budibase/templates/automation-worker-service-deployment.yaml @@ -0,0 +1,262 @@ +{{- if .Values.services.automationWorkers.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: +{{ if .Values.services.automationWorkers.deploymentAnnotations }} +{{- toYaml .Values.services.automationWorkers.deploymentAnnotations | indent 4 -}} +{{ end }} + labels: + io.kompose.service: automation-worker-service +{{ if .Values.services.automationWorkers.deploymentLabels }} +{{- toYaml .Values.services.automationWorkers.deploymentLabels | indent 4 -}} +{{ end }} + name: automation-worker-service +spec: + replicas: {{ .Values.services.automationWorkers.replicaCount }} + selector: + matchLabels: + io.kompose.service: automation-worker-service + strategy: + type: RollingUpdate + template: + metadata: + annotations: +{{ if .Values.services.automationWorkers.templateAnnotations }} +{{- toYaml .Values.services.automationWorkers.templateAnnotations | indent 8 -}} +{{ end }} + labels: + io.kompose.service: automation-worker-service +{{ if .Values.services.automationWorkers.templateLabels }} +{{- toYaml .Values.services.automationWorkers.templateLabels | indent 8 -}} +{{ end }} + spec: + containers: + - env: + - name: BUDIBASE_ENVIRONMENT + value: {{ .Values.globals.budibaseEnv }} + - name: DEPLOYMENT_ENVIRONMENT + value: "kubernetes" + - name: COUCH_DB_URL + {{ if .Values.services.couchdb.url }} + value: {{ .Values.services.couchdb.url }} + {{ else }} + value: http://{{ .Release.Name }}-svc-couchdb:{{ .Values.services.couchdb.port }} + {{ end }} + {{ if .Values.services.couchdb.enabled }} + - name: COUCH_DB_USER + valueFrom: + secretKeyRef: + name: {{ template "couchdb.fullname" . }} + key: adminUsername + - name: COUCH_DB_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "couchdb.fullname" . }} + key: adminPassword + {{ end }} + - name: ENABLE_ANALYTICS + value: {{ .Values.globals.enableAnalytics | quote }} + - name: API_ENCRYPTION_KEY + value: {{ .Values.globals.apiEncryptionKey | quote }} + - name: HTTP_LOGGING + value: {{ .Values.services.automationWorkers.httpLogging | quote }} + - name: INTERNAL_API_KEY + valueFrom: + secretKeyRef: + name: {{ template "budibase.fullname" . }} + key: internalApiKey + - name: INTERNAL_API_KEY_FALLBACK + value: {{ .Values.globals.internalApiKeyFallback | quote }} + - name: JWT_SECRET + valueFrom: + secretKeyRef: + name: {{ template "budibase.fullname" . }} + key: jwtSecret + - name: JWT_SECRET_FALLBACK + value: {{ .Values.globals.jwtSecretFallback | quote }} + {{ if .Values.services.objectStore.region }} + - name: AWS_REGION + value: {{ .Values.services.objectStore.region }} + {{ end }} + - name: MINIO_ENABLED + value: {{ .Values.services.objectStore.minio | quote }} + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ template "budibase.fullname" . }} + key: objectStoreAccess + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ template "budibase.fullname" . }} + key: objectStoreSecret + - name: CLOUDFRONT_CDN + value: {{ .Values.services.objectStore.cloudfront.cdn | quote }} + - name: CLOUDFRONT_PUBLIC_KEY_ID + value: {{ .Values.services.objectStore.cloudfront.publicKeyId | quote }} + - name: CLOUDFRONT_PRIVATE_KEY_64 + value: {{ .Values.services.objectStore.cloudfront.privateKey64 | quote }} + - name: MINIO_URL + value: {{ .Values.services.objectStore.url }} + - name: PLUGIN_BUCKET_NAME + value: {{ .Values.services.objectStore.pluginBucketName | quote }} + - name: APPS_BUCKET_NAME + value: {{ .Values.services.objectStore.appsBucketName | quote }} + - name: GLOBAL_BUCKET_NAME + value: {{ .Values.services.objectStore.globalBucketName | quote }} + - name: BACKUPS_BUCKET_NAME + value: {{ .Values.services.objectStore.backupsBucketName | quote }} + - name: PORT + value: {{ .Values.services.automationWorkers.port | quote }} + {{ if .Values.services.worker.publicApiRateLimitPerSecond }} + - name: API_REQ_LIMIT_PER_SEC + value: {{ .Values.globals.automationWorkers.publicApiRateLimitPerSecond | quote }} + {{ end }} + - name: MULTI_TENANCY + value: {{ .Values.globals.multiTenancy | quote }} + - name: OFFLINE_MODE + value: {{ .Values.globals.offlineMode | quote }} + - name: LOG_LEVEL + value: {{ .Values.services.automationWorkers.logLevel | quote }} + - name: REDIS_PASSWORD + value: {{ .Values.services.redis.password }} + - name: REDIS_URL + {{ if .Values.services.redis.url }} + value: {{ .Values.services.redis.url }} + {{ else }} + value: redis-service:{{ .Values.services.redis.port }} + {{ end }} + - name: SELF_HOSTED + value: {{ .Values.globals.selfHosted | quote }} + - name: POSTHOG_TOKEN + value: {{ .Values.globals.posthogToken | quote }} + - name: WORKER_URL + value: http://worker-service:{{ .Values.services.worker.port }} + - name: PLATFORM_URL + value: {{ .Values.globals.platformUrl | quote }} + - name: ACCOUNT_PORTAL_URL + value: {{ .Values.globals.accountPortalUrl | quote }} + - name: ACCOUNT_PORTAL_API_KEY + value: {{ .Values.globals.accountPortalApiKey | quote }} + - name: COOKIE_DOMAIN + value: {{ .Values.globals.cookieDomain | quote }} + - name: HTTP_MIGRATIONS + value: {{ .Values.globals.httpMigrations | quote }} + - name: GOOGLE_CLIENT_ID + value: {{ .Values.globals.google.clientId | quote }} + - name: GOOGLE_CLIENT_SECRET + value: {{ .Values.globals.google.secret | quote }} + - name: AUTOMATION_MAX_ITERATIONS + value: {{ .Values.globals.automationMaxIterations | quote }} + - name: TENANT_FEATURE_FLAGS + value: {{ .Values.globals.tenantFeatureFlags | quote }} + - name: ENCRYPTION_KEY + value: {{ .Values.globals.bbEncryptionKey | quote }} + {{ if .Values.globals.bbAdminUserEmail }} + - name: BB_ADMIN_USER_EMAIL + value: {{ .Values.globals.bbAdminUserEmail | quote }} + {{ end }} + {{ if .Values.globals.bbAdminUserPassword }} + - name: BB_ADMIN_USER_PASSWORD + value: {{ .Values.globals.bbAdminUserPassword | quote }} + {{ end }} + {{ if .Values.globals.pluginsDir }} + - name: PLUGINS_DIR + value: {{ .Values.globals.pluginsDir | quote }} + {{ end }} + {{ if .Values.services.automationWorkers.nodeDebug }} + - name: NODE_DEBUG + value: {{ .Values.services.automationWorkers.nodeDebug | quote }} + {{ end }} + {{ if .Values.globals.datadogApmEnabled }} + - name: DD_LOGS_INJECTION + value: {{ .Values.globals.datadogApmEnabled | quote }} + - name: DD_APM_ENABLED + value: {{ .Values.globals.datadogApmEnabled | quote }} + - name: DD_APM_DD_URL + value: https://trace.agent.datadoghq.eu + {{ end }} + {{ if .Values.globals.globalAgentHttpProxy }} + - name: GLOBAL_AGENT_HTTP_PROXY + value: {{ .Values.globals.globalAgentHttpProxy | quote }} + {{ end }} + {{ if .Values.globals.globalAgentHttpsProxy }} + - name: GLOBAL_AGENT_HTTPS_PROXY + value: {{ .Values.globals.globalAgentHttpsProxy | quote }} + {{ end }} + {{ if .Values.globals.globalAgentNoProxy }} + - name: GLOBAL_AGENT_NO_PROXY + value: {{ .Values.globals.globalAgentNoProxy | quote }} + {{ end }} + {{ if .Values.services.tlsRejectUnauthorized }} + - name: NODE_TLS_REJECT_UNAUTHORIZED + value: {{ .Values.services.tlsRejectUnauthorized }} + {{ end }} + - name: APP_FEATURES + value: "automations" + {{- range .Values.services.automationWorkers.extraEnv }} + - name: {{ .name }} + value: {{ .value | quote }} + {{- end }} + + image: budibase/apps:{{ .Values.globals.appVersion | default .Chart.AppVersion }} + imagePullPolicy: Always + {{- if .Values.services.automationWorkers.startupProbe }} + {{- with .Values.services.automationWorkers.startupProbe }} + startupProbe: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} + {{- if .Values.services.automationWorkers.livenessProbe }} + {{- with .Values.services.automationWorkers.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} + {{- if .Values.services.automationWorkers.readinessProbe }} + {{- with .Values.services.automationWorkers.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} + name: bbautomationworker + ports: + - containerPort: {{ .Values.services.automationWorkers.port }} + {{ with .Values.services.automationWorkers.resources }} + resources: + {{- toYaml . | nindent 10 }} + {{ end }} + {{ if .Values.services.automationWorkers.command }} + command: + {{- toYaml .Values.services.automationWorkers.command | nindent 10 }} + {{ end }} + {{ if .Values.services.automationWorkers.args }} + args: + {{- toYaml .Values.services.automationWorkers.args | nindent 10 }} + {{ end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{ if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{ end }} + {{ if .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml .Values.imagePullSecrets | nindent 6 }} + {{ end }} + restartPolicy: Always + serviceAccountName: "" + {{ if .Values.services.automationWorkers.ndots }} + dnsConfig: + options: + - name: ndots + value: {{ .Values.services.automationWorkers.ndots | quote }} + {{ end }} +status: {} +{{- end }} \ No newline at end of file diff --git a/charts/budibase/templates/automation-worker-service-hpa.yaml b/charts/budibase/templates/automation-worker-service-hpa.yaml new file mode 100644 index 0000000000..f29223b61b --- /dev/null +++ b/charts/budibase/templates/automation-worker-service-hpa.yaml @@ -0,0 +1,32 @@ +{{- if .Values.services.automationWorkers.autoscaling.enabled }} +apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "budibase.fullname" . }}-apps + labels: + {{- include "budibase.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: automation-worker-service + minReplicas: {{ .Values.services.automationWorkers.autoscaling.minReplicas }} + maxReplicas: {{ .Values.services.automationWorkers.autoscaling.maxReplicas }} + metrics: + {{- if .Values.services.automationWorkers.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.services.automationWorkers.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.services.automationWorkers.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.services.automationWorkers.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/budibase/templates/couchdb-backup.yaml b/charts/budibase/templates/couchdb-backup.yaml index 7396f97476..6f842537a7 100644 --- a/charts/budibase/templates/couchdb-backup.yaml +++ b/charts/budibase/templates/couchdb-backup.yaml @@ -2,10 +2,6 @@ apiVersion: apps/v1 kind: Deployment metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: app.kubernetes.io/name: couchdb-backup name: couchdb-backup @@ -18,10 +14,6 @@ spec: type: Recreate template: metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: app.kubernetes.io/name: couchdb-backup spec: diff --git a/charts/budibase/templates/hpa.yaml b/charts/budibase/templates/hpa.yaml deleted file mode 100644 index 2f901b4664..0000000000 --- a/charts/budibase/templates/hpa.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "budibase.fullname" . }} - labels: - {{- include "budibase.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "budibase.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} diff --git a/charts/budibase/templates/minio-data-persistentvolumeclaim.yaml b/charts/budibase/templates/minio-data-persistentvolumeclaim.yaml index abcf341bc5..c17001a436 100644 --- a/charts/budibase/templates/minio-data-persistentvolumeclaim.yaml +++ b/charts/budibase/templates/minio-data-persistentvolumeclaim.yaml @@ -2,7 +2,6 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: - creationTimestamp: null labels: io.kompose.service: minio-data name: minio-data diff --git a/charts/budibase/templates/minio-service-deployment.yaml b/charts/budibase/templates/minio-service-deployment.yaml index 41af2624bf..28e8eb9991 100644 --- a/charts/budibase/templates/minio-service-deployment.yaml +++ b/charts/budibase/templates/minio-service-deployment.yaml @@ -2,10 +2,6 @@ apiVersion: apps/v1 kind: Deployment metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: io.kompose.service: minio-service name: minio-service @@ -18,10 +14,6 @@ spec: type: Recreate template: metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: io.kompose.service: minio-service spec: @@ -46,11 +38,9 @@ spec: image: minio/minio imagePullPolicy: "" livenessProbe: - exec: - command: - - curl - - -f - - http://localhost:9000/minio/health/live + httpGet: + path: /minio/health/live + port: 9000 failureThreshold: 3 periodSeconds: 30 timeoutSeconds: 20 diff --git a/charts/budibase/templates/minio-service-service.yaml b/charts/budibase/templates/minio-service-service.yaml index cfdb22002b..ce89f1347c 100644 --- a/charts/budibase/templates/minio-service-service.yaml +++ b/charts/budibase/templates/minio-service-service.yaml @@ -2,10 +2,6 @@ apiVersion: v1 kind: Service metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: io.kompose.service: minio-service name: minio-service diff --git a/charts/budibase/templates/proxy-service-deployment.yaml b/charts/budibase/templates/proxy-service-deployment.yaml index 53bba6232d..233028cafe 100644 --- a/charts/budibase/templates/proxy-service-deployment.yaml +++ b/charts/budibase/templates/proxy-service-deployment.yaml @@ -2,12 +2,9 @@ apiVersion: apps/v1 kind: Deployment metadata: annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) {{ if .Values.services.proxy.deploymentAnnotations }} {{- toYaml .Values.services.proxy.deploymentAnnotations | indent 4 -}} {{ end }} - creationTimestamp: null labels: app.kubernetes.io/name: budibase-proxy {{ if .Values.services.proxy.deploymentLabels }} @@ -19,17 +16,15 @@ spec: selector: matchLabels: app.kubernetes.io/name: budibase-proxy + minReadySeconds: 10 strategy: type: RollingUpdate template: metadata: annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) {{ if .Values.services.proxy.templateAnnotations }} {{- toYaml .Values.services.proxy.templateAnnotations | indent 8 -}} {{ end }} - creationTimestamp: null labels: app.kubernetes.io/name: budibase-proxy {{ if .Values.services.proxy.templateLabels }} @@ -105,5 +100,19 @@ spec: {{ end }} restartPolicy: Always serviceAccountName: "" + {{ if .Values.services.proxy.command }} + command: + {{- toYaml .Values.services.proxy.command | nindent 8 }} + {{ end }} + {{ if .Values.services.proxy.args }} + args: + {{- toYaml .Values.services.proxy.args | nindent 8 }} + {{ end }} volumes: + {{ if .Values.services.proxy.ndots }} + dnsConfig: + options: + - name: ndots + value: {{ .Values.services.proxy.ndots | quote }} + {{ end }} status: {} diff --git a/charts/budibase/templates/proxy-service-hpa.yaml b/charts/budibase/templates/proxy-service-hpa.yaml new file mode 100644 index 0000000000..b6c6022008 --- /dev/null +++ b/charts/budibase/templates/proxy-service-hpa.yaml @@ -0,0 +1,32 @@ +{{- if .Values.services.proxy.autoscaling.enabled }} +apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "budibase.fullname" . }}-proxy + labels: + {{- include "budibase.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: proxy-service + minReplicas: {{ .Values.services.proxy.autoscaling.minReplicas }} + maxReplicas: {{ .Values.services.proxy.autoscaling.maxReplicas }} + metrics: + {{- if .Values.services.proxy.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.services.proxy.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.services.proxy.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.services.proxy.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/budibase/templates/proxy-service-service.yaml b/charts/budibase/templates/proxy-service-service.yaml index bf2b199ee5..988c540599 100644 --- a/charts/budibase/templates/proxy-service-service.yaml +++ b/charts/budibase/templates/proxy-service-service.yaml @@ -1,10 +1,6 @@ apiVersion: v1 kind: Service metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: app.kubernetes.io/name: budibase-proxy name: proxy-service @@ -16,4 +12,4 @@ spec: selector: app.kubernetes.io/name: budibase-proxy status: - loadBalancer: {} \ No newline at end of file + loadBalancer: {} diff --git a/charts/budibase/templates/redis-data-persistentvolumeclaim.yaml b/charts/budibase/templates/redis-data-persistentvolumeclaim.yaml index 6f11492b58..e605a98376 100644 --- a/charts/budibase/templates/redis-data-persistentvolumeclaim.yaml +++ b/charts/budibase/templates/redis-data-persistentvolumeclaim.yaml @@ -2,7 +2,6 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: - creationTimestamp: null labels: io.kompose.service: redis-data name: redis-data diff --git a/charts/budibase/templates/redis-service-deployment.yaml b/charts/budibase/templates/redis-service-deployment.yaml index 9b39d14291..bca40d2237 100644 --- a/charts/budibase/templates/redis-service-deployment.yaml +++ b/charts/budibase/templates/redis-service-deployment.yaml @@ -2,10 +2,6 @@ apiVersion: apps/v1 kind: Deployment metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: io.kompose.service: redis-service name: redis-service @@ -18,10 +14,6 @@ spec: type: Recreate template: metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: io.kompose.service: redis-service spec: diff --git a/charts/budibase/templates/redis-service-service.yaml b/charts/budibase/templates/redis-service-service.yaml index 55ca40ed88..b8f998925a 100644 --- a/charts/budibase/templates/redis-service-service.yaml +++ b/charts/budibase/templates/redis-service-service.yaml @@ -2,10 +2,6 @@ apiVersion: v1 kind: Service metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: io.kompose.service: redis-service name: redis-service diff --git a/charts/budibase/templates/worker-service-deployment.yaml b/charts/budibase/templates/worker-service-deployment.yaml index 5e0addb9dd..2f97508ae3 100644 --- a/charts/budibase/templates/worker-service-deployment.yaml +++ b/charts/budibase/templates/worker-service-deployment.yaml @@ -2,12 +2,9 @@ apiVersion: apps/v1 kind: Deployment metadata: annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) {{ if .Values.services.worker.deploymentAnnotations }} {{- toYaml .Values.services.worker.deploymentAnnotations | indent 4 -}} {{ end }} - creationTimestamp: null labels: io.kompose.service: worker-service {{ if .Values.services.worker.deploymentLabels }} @@ -24,12 +21,9 @@ spec: template: metadata: annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) {{ if .Values.services.worker.templateAnnotations }} {{- toYaml .Values.services.worker.templateAnnotations | indent 8 -}} {{ end }} - creationTimestamp: null labels: io.kompose.service: worker-service {{ if .Values.services.worker.templateLabels }} @@ -188,6 +182,10 @@ spec: - name: NODE_TLS_REJECT_UNAUTHORIZED value: {{ .Values.services.tlsRejectUnauthorized }} {{ end }} + {{- range .Values.services.worker.extraEnv }} + - name: {{ .name }} + value: {{ .value | quote }} + {{- end }} image: budibase/worker:{{ .Values.globals.appVersion | default .Chart.AppVersion }} imagePullPolicy: Always {{- if .Values.services.worker.startupProbe }} @@ -215,6 +213,14 @@ spec: resources: {{- toYaml . | nindent 10 }} {{ end }} + {{ if .Values.services.worker.command }} + command: + {{- toYaml .Values.services.worker.command | nindent 10 }} + {{ end }} + {{ if .Values.services.worker.args }} + args: + {{- toYaml .Values.services.worker.args | nindent 10 }} + {{ end }} {{- with .Values.affinity }} affinity: {{- toYaml . | nindent 8 }} @@ -232,4 +238,10 @@ spec: {{ end }} restartPolicy: Always serviceAccountName: "" + {{ if .Values.services.worker.ndots }} + dnsConfig: + options: + - name: ndots + value: {{ .Values.services.worker.ndots | quote }} + {{ end }} status: {} diff --git a/charts/budibase/templates/worker-service-hpa.yaml b/charts/budibase/templates/worker-service-hpa.yaml new file mode 100644 index 0000000000..a04cc259a0 --- /dev/null +++ b/charts/budibase/templates/worker-service-hpa.yaml @@ -0,0 +1,32 @@ +{{- if .Values.services.worker.autoscaling.enabled }} +apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "budibase.fullname" . }}-worker + labels: + {{- include "budibase.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: worker-service + minReplicas: {{ .Values.services.worker.autoscaling.minReplicas }} + maxReplicas: {{ .Values.services.worker.autoscaling.maxReplicas }} + metrics: + {{- if .Values.services.worker.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.services.worker.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.services.worker.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.services.worker.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/budibase/templates/worker-service-service.yaml b/charts/budibase/templates/worker-service-service.yaml index a79ba1e04b..c5f56ba205 100644 --- a/charts/budibase/templates/worker-service-service.yaml +++ b/charts/budibase/templates/worker-service-service.yaml @@ -1,10 +1,6 @@ apiVersion: v1 kind: Service metadata: - annotations: - kompose.cmd: kompose convert - kompose.version: 1.21.0 (992df58d8) - creationTimestamp: null labels: io.kompose.service: worker-service name: worker-service diff --git a/charts/budibase/values.yaml b/charts/budibase/values.yaml index 857067d0f1..09262df463 100644 --- a/charts/budibase/values.yaml +++ b/charts/budibase/values.yaml @@ -1,56 +1,32 @@ -# Default values for budibase. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -image: - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: "" - +# -- Passed to all pods created by this chart. Should not ordinarily need to be changed. imagePullSecrets: [] +# -- Override the name of the deploymen. Defaults to {{ .Chart.Name }}. nameOverride: "" -# fullnameOverride: "" serviceAccount: - # Specifies whether a service account should be created + # -- Specifies whether a service account should be created create: true - # Annotations to add to the service account + # -- Annotations to add to the service account annotations: {} - # The name of the service account to use. + # -- The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "" -podAnnotations: {} - -podSecurityContext: - {} - # fsGroup: 2000 - -securityContext: - {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - service: + # -- Service type for the service that points to the main Budibase proxy pod. type: ClusterIP + # -- Port to expose on the service. port: 10000 ingress: + # -- Whether to create an Ingress resource pointing to the Budibase proxy. enabled: true - aws: false - nginx: true - certificateArn: "" + # -- What ingress class to use. className: "" - annotations: - kubernetes.io/ingress.class: nginx - nginx.ingress.kubernetes.io/client-max-body-size: 150M - nginx.ingress.kubernetes.io/proxy-body-size: 50m + # -- Standard hosts block for the Ingress resource. Defaults to pointing to the Budibase proxy. hosts: - - host: # change if using custom domain + # @ignore + - host: paths: - path: / pathType: Prefix @@ -60,361 +36,504 @@ ingress: port: number: 10000 -autoscaling: +awsAlbIngress: + # -- Whether to create an ALB Ingress resource pointing to the Budibase proxy. Requires the AWS ALB Ingress Controller. enabled: false - minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - # targetMemoryUtilizationPercentage: 80 - -nodeSelector: {} + # -- If you're wanting to use HTTPS, you'll need to create an ACM certificate and specify the ARN here. + certificateArn: "" +# -- Sets the tolerations for all pods created by this chart. Should not ordinarily need to be changed. +# See for more information +# on tolerations. tolerations: [] +# -- Sets the affinity for all pods created by this chart. Should not ordinarily +# need to be changed. See +# +# for more information on affinity. affinity: {} globals: - appVersion: "" # Use as an override to .Chart.AppVersion + # -- The version of Budibase to deploy. Defaults to what's specified by {{ .Chart.AppVersion }}. + # Ends up being used as the image version tag for the apps, proxy, and worker images. + appVersion: "" + # -- Sets the environment variable BUDIBASE_ENVIRONMENT for the apps and worker pods. Should not + # ordinarily need to be changed. budibaseEnv: PRODUCTION + # -- Sets what feature flags are enabled and for which tenants. Should not ordinarily need to be + # changed. tenantFeatureFlags: "*:LICENSING,*:USER_GROUPS,*:ONBOARDING_TOUR" + # -- Whether to enable analytics or not. You can read more about our analytics here: + # . enableAnalytics: "1" + # @ignore (only used if enableAnalytics is set to 1) posthogToken: "phc_bIjZL7oh2GEUd2vqvTBH8WvrX0fWTFQMs6H5KQxiUxU" - selfHosted: "1" # set to 0 for budibase cloud environment, set to 1 for self-hosted setup - multiTenancy: "0" # set to 0 to disable multiple orgs, set to 1 to enable multiple orgs - offlineMode: "0" # set to 1 to enable offline mode + # @ignore (should not normally need to be changed, we only set this to "0" + # when deploying to our Cloud environment) + selfHosted: "1" + # @ignore (doesn't work out of the box for self-hosted users, only meant for Budicloud) + multiTenancy: "0" + # @ignore (only currently used to determine whether to fetch licenses offline or not, should + # not normally need to be changed, and only applies to Enterprise customers) + offlineMode: "0" + # @ignore (only needs to be set in our cloud environment) accountPortalUrl: "" + # @ignore (only needs to be set in our cloud environment) accountPortalApiKey: "" + # -- Sets the domain attribute of the cookie that Budibase uses to store session information. + # See + # for details on why you might want to set this. cookieDomain: "" + # -- Set the `platformUrl` binding. You can also do this in Settings > Organisation if you are + # self-hosting. platformUrl: "" + # -- Whether or not to enable doing data migrations over the HTTP API. If this is set to "0", + # migrations are run on startup. You shouldn't ordinarily need to change this. httpMigrations: "0" + # -- Google OAuth settings. These can also be set in the Budibase UI, see + # for details. google: + # -- Client ID of your Google OAuth app. clientId: "" + # -- Client secret of your Google OAuth app. secret: "" + # -- The maximum number of iterations allows for an automation loop step. You can read more about + # looping here: . automationMaxIterations: "200" - createSecrets: true # creates an internal API key, JWT secrets and redis password for you + # -- Create an internal API key, JWT secret, object store access key and + # secret, and store them in a Kubernetes `Secret`. + createSecrets: true - # if createSecrets is set to false, you can hard-code your secrets here + # -- Used for encrypting API keys and environment variables when stored in the database. + # You don't need to set this if `createSecrets` is true. apiEncryptionKey: "" + # -- API key used for internal Budibase API calls. You don't need to set this + # if `createSecrets` is true. internalApiKey: "" + # -- Secret used for signing JWTs. You don't need to set this if `createSecrets` is true. jwtSecret: "" - cdnUrl: "" - # fallback values used during live rotation + + # -- A fallback value for `internalApiKey`. If you're rotating your encryption key, you can + # set this to the old value for the duration of the rotation. internalApiKeyFallback: "" + # -- A fallback value for `jwtSecret`. If you're rotating your JWT secret, you can set this + # to the old value for the duration of the rotation. jwtSecretFallback: "" smtp: + # -- Whether to enable SMTP or not. enabled: false - -# globalAgentHttpProxy: -# globalAgentHttpsProxy: -# globalAgentNoProxy: + # -- The hostname of your SMTP server. + host: "" + # -- The port of your SMTP server. + port: "587" + # -- The email address to use in the "From:" field of emails sent by Budibase. + from: "" + # -- The username to use when authenticating with your SMTP server. + user: "" + # -- The password to use when authenticating with your SMTP server. + password: "" services: - budibaseVersion: latest + # -- The DNS suffix to use for service discovery. You only need to change this + # if you've configured your cluster to use a different DNS suffix. dns: cluster.local - # tlsRejectUnauthorized: 0 proxy: + # @ignore (you shouldn't need to change this) port: 10000 + # -- The number of proxy replicas to run. replicaCount: 1 + # @ignore (you should never need to change this) upstreams: apps: "http://app-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.apps.port }}" worker: "http://worker-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.worker.port }}" minio: "http://minio-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.objectStore.port }}" couchdb: "http://{{ .Release.Name }}-svc-couchdb:{{ .Values.services.couchdb.port }}" + # -- The resources to use for proxy pods. See + # + # for more information on how to set these. resources: {} + # -- Startup probe configuration for proxy pods. You shouldn't need to + # change this, but if you want to you can find more information here: + # + # @default -- HTTP health checks. startupProbe: + # @ignore httpGet: path: /health port: 10000 scheme: HTTP + # @ignore failureThreshold: 30 + # @ignore periodSeconds: 3 + # -- Readiness probe configuration for proxy pods. You shouldn't need to + # change this, but if you want to you can find more information here: + # + # @default -- HTTP health checks. readinessProbe: + # @ignore httpGet: path: /health port: 10000 scheme: HTTP + # @ignore periodSeconds: 3 + # @ignore failureThreshold: 1 + # -- Liveness probe configuration for proxy pods. You shouldn't need to + # change this, but if you want to you can find more information here: + # + # @default -- HTTP health checks. livenessProbe: + # @ignore httpGet: path: /health port: 10000 scheme: HTTP + # @ignore failureThreshold: 3 + # @ignore periodSeconds: 5 - # annotations: - # co.elastic.logs/module: nginx - # co.elastic.logs/fileset.stdout: access - # co.elastic.logs/fileset.stderr: error + autoscaling: + # -- Whether to enable horizontal pod autoscaling for the proxy service. + enabled: false + minReplicas: 1 + maxReplicas: 10 + # -- Target CPU utilization percentage for the proxy service. Note that + # for autoscaling to work, you will need to have metrics-server + # configured, and resources set for the proxy pods. + targetCPUUtilizationPercentage: 80 apps: + # @ignore (you shouldn't need to change this) port: 4002 + # -- The number of apps replicas to run. replicaCount: 1 + # -- The log level for the apps service. logLevel: info + # -- Whether or not to log HTTP requests to the apps service. httpLogging: 1 + # -- The resources to use for apps pods. See + # + # for more information on how to set these. resources: {} + # -- Extra environment variables to set for apps pods. Takes a list of + # name=value pairs. + extraEnv: [] + # -- Startup probe configuration for apps pods. You shouldn't need to + # change this, but if you want to you can find more information here: + # + # @default -- HTTP health checks. startupProbe: + # @ignore httpGet: path: /health port: 4002 scheme: HTTP + # @ignore failureThreshold: 30 + # @ignore periodSeconds: 3 + # -- Readiness probe configuration for apps pods. You shouldn't need to + # change this, but if you want to you can find more information here: + # + # @default -- HTTP health checks. readinessProbe: + # @ignore httpGet: path: /health port: 4002 scheme: HTTP + # @ignore periodSeconds: 3 + # @ignore failureThreshold: 1 + # -- Liveness probe configuration for apps pods. You shouldn't need to + # change this, but if you want to you can find more information here: + # + # @default -- HTTP health checks. livenessProbe: + # @ignore httpGet: path: /health port: 4002 scheme: HTTP + # @ignore failureThreshold: 3 + # @ignore periodSeconds: 5 - # nodeDebug: "" # set the value of NODE_DEBUG - # annotations: - # co.elastic.logs/multiline.type: pattern - # co.elastic.logs/multiline.pattern: '^[[:space:]]' - # co.elastic.logs/multiline.negate: false - # co.elastic.logs/multiline.match: after + autoscaling: + # -- Whether to enable horizontal pod autoscaling for the apps service. + enabled: false + minReplicas: 1 + maxReplicas: 10 + # -- Target CPU utilization percentage for the apps service. Note that for + # autoscaling to work, you will need to have metrics-server configured, + # and resources set for the apps pods. + targetCPUUtilizationPercentage: 80 + + automationWorkers: + # -- Whether or not to enable the automation worker service. If you disable this, + # automations will be processed by the apps service. + enabled: true + # @ignore (you shouldn't need to change this) + port: 4002 + # -- The number of automation worker replicas to run. + replicaCount: 1 + # -- The log level for the automation worker service. + logLevel: info + # -- The resources to use for automation worker pods. See + # + # for more information on how to set these. + resources: {} + # -- Extra environment variables to set for automation worker pods. Takes a list of + # name=value pairs. + extraEnv: [] + # -- Startup probe configuration for automation worker pods. You shouldn't + # need to change this, but if you want to you can find more information + # here: + # + # @default -- HTTP health checks. + startupProbe: + # @ignore + httpGet: + path: /health + port: 4002 + scheme: HTTP + # @ignore + failureThreshold: 30 + # @ignore + periodSeconds: 3 + # -- Readiness probe configuration for automation worker pods. You shouldn't + # need to change this, but if you want to you can find more information + # here: + # + # @default -- HTTP health checks. + readinessProbe: + # @ignore + httpGet: + path: /health + port: 4002 + scheme: HTTP + # @ignore + periodSeconds: 3 + # @ignore + failureThreshold: 1 + # -- Liveness probe configuration for automation worker pods. You shouldn't + # need to change this, but if you want to you can find more information + # here: + # + # @default -- HTTP health checks. + livenessProbe: + # @ignore + httpGet: + path: /health + port: 4002 + scheme: HTTP + # @ignore + failureThreshold: 3 + # @ignore + periodSeconds: 30 + autoscaling: + # -- Whether to enable horizontal pod autoscaling for the apps service. + enabled: false + minReplicas: 1 + maxReplicas: 10 + # -- Target CPU utilization percentage for the automation worker service. + # Note that for autoscaling to work, you will need to have metrics-server + # configured, and resources set for the automation worker pods. + targetCPUUtilizationPercentage: 80 + worker: + # @ignore (you shouldn't need to change this) port: 4003 + # -- The number of worker replicas to run. replicaCount: 1 + # -- The log level for the worker service. logLevel: info + # -- Whether or not to log HTTP requests to the worker service. httpLogging: 1 + # -- The resources to use for worker pods. See + # + # for more information on how to set these. resources: {} + # -- Extra environment variables to set for worker pods. Takes a list of + # name=value pairs. + extraEnv: [] + # -- Startup probe configuration for worker pods. You shouldn't need to + # change this, but if you want to you can find more information here: + # + # @default -- HTTP health checks. startupProbe: + # @ignore httpGet: path: /health port: 4003 scheme: HTTP + # @ignore failureThreshold: 30 + # @ignore periodSeconds: 3 + # -- Readiness probe configuration for worker pods. You shouldn't need to + # change this, but if you want to you can find more information here: + # + # @default -- HTTP health checks. readinessProbe: + # @ignore httpGet: path: /health port: 4003 scheme: HTTP + # @ignore periodSeconds: 3 + # @ignore failureThreshold: 1 + # -- Liveness probe configuration for worker pods. You shouldn't need to + # change this, but if you want to you can find more information here: + # + # @default -- HTTP health checks. livenessProbe: + # @ignore httpGet: path: /health port: 4003 scheme: HTTP + # @ignore failureThreshold: 3 + # @ignore periodSeconds: 5 - # annotations: - # co.elastic.logs/multiline.type: pattern - # co.elastic.logs/multiline.pattern: '^[[:space:]]' - # co.elastic.logs/multiline.negate: false - # co.elastic.logs/multiline.match: after + autoscaling: + # -- Whether to enable horizontal pod autoscaling for the worker service. + enabled: false + minReplicas: 1 + maxReplicas: 10 + # -- Target CPU utilization percentage for the worker service. Note that + # for autoscaling to work, you will need to have metrics-server + # configured, and resources set for the worker pods. + targetCPUUtilizationPercentage: 80 couchdb: + # -- Whether or not to spin up a CouchDB instance in your cluster. True by + # default, and the configuration for the CouchDB instance is under the + # `couchdb` key at the root of this file. You can see what options are + # available to you by looking at the official CouchDB Helm chart: + # . enabled: true # url: "" # only change if pointing to existing couch server # user: "" # only change if pointing to existing couch server # password: "" # only change if pointing to existing couch server port: 5984 backup: + # -- Whether or not to enable periodic CouchDB backups. This works by replicating + # to another CouchDB instance. enabled: false - # target couchDB instance to back up to + # -- Target couchDB instance to back up to, either a hostname or an IP address. target: "" - # backup interval in seconds + # -- Backup interval in seconds interval: "" + # -- The resources to use for CouchDB backup pods. See + # + # for more information on how to set these. resources: {} redis: - enabled: true # disable if using external redis + # -- Whether or not to deploy a Redis pod into your cluster. + enabled: true + # -- Port to expose Redis on. port: 6379 + # @ignore (you should leave this as 1, we don't support clustering Redis) replicaCount: 1 - url: "" # only change if pointing to existing redis cluster and enabled: false - password: "budibase" # recommended to override if using built-in redis + # -- If you choose to run Redis externally to this chart, you can specify the + # connection details here. + url: "" + # -- The password to use when connecting to Redis. It's recommended that you change + # this from the default if you're running Redis in-cluster. + password: "budibase" + # -- How much persistent storage to allocate to Redis. storage: 100Mi - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. + # -- If defined, storageClassName: If set to "-", + # storageClassName: "", which disables dynamic provisioning If undefined + # (the default) or set to null, no storageClassName spec is set, choosing + # the default provisioner. storageClass: "" + # -- The resources to use for Redis pods. See + # + # for more information on how to set these. resources: {} objectStore: - # Set to false if using another object store such as S3 + # -- Set to false if using another object store, such as S3. You will need + # to set `services.objectStore.url` to point to your bucket if you do this. minio: true + # -- Whether to enable the Minio web console or not. If you're exposing + # Minio to the Internet (via a custom Ingress record, for example), you + # should set this to false. If you're only exposing Minio to your cluster, + # you can leave this as true. browser: true + # @ignore port: 9000 + # @ignore (you should leave this as 1, we don't support clustering Minio) replicaCount: 1 - accessKey: "" # AWS_ACCESS_KEY if using S3 or existing minio access key - secretKey: "" # AWS_SECRET_ACCESS_KEY if using S3 or existing minio secret - region: "" # AWS_REGION if using S3 or existing minio secret - url: "http://minio-service:9000" # only change if pointing to existing minio cluster or S3 and minio: false + # -- AWS_ACCESS_KEY if using S3 + accessKey: "" + # -- AWS_SECRET_ACCESS_KEY if using S3 + secretKey: "" + # -- AWS_REGION if using S3 + region: "" + # -- URL to use for object storage. Only change this if you're using an + # external object store, such as S3. Remember to set `minio: false` if you + # do this. + url: "http://minio-service:9000" + # -- How much storage to give Minio in its PersistentVolumeClaim. storage: 100Mi - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. + # -- If defined, storageClassName: If set to "-", + # storageClassName: "", which disables dynamic provisioning If undefined + # (the default) or set to null, no storageClassName spec is set, choosing + # the default provisioner. storageClass: "" + # -- The resources to use for Minio pods. See + # + # for more information on how to set these. resources: {} cloudfront: - # Set the url of a distribution to enable cloudfront + # -- Set the url of a distribution to enable cloudfront. cdn: "" - # ID of public key stored in cloudfront + # -- ID of public key stored in cloudfront. publicKeyId: "" - # Base64 encoded private key for the above public key + # -- Base64 encoded private key for the above public key. privateKey64: "" -# Override values in couchDB subchart +# Override values in couchDB subchart. We're only specifying the values we're changing. +# If you want to see all of the available values, see: +# https://github.com/apache/couchdb-helm/tree/couchdb-4.3.0/couchdb couchdb: - ## clusterSize is the initial size of the CouchDB cluster. + # -- The number of replicas to run in the CouchDB cluster. We set this to + # 1 by default to make things simpler, but you can set it to 3 if you need + # a high-availability CouchDB cluster. clusterSize: 1 - allowAdminParty: false - # Secret Management - createAdminSecret: true - - # adminUsername: budibase - # adminPassword: budibase - # adminHash: -pbkdf2-this_is_not_necessarily_secure_either - # cookieAuthSecret: admin - - ## When enabled, will deploy a networkpolicy that allows CouchDB pods to - ## communicate with each other for clustering and ingress on port 5984 - networkPolicy: - enabled: true - - # Use a service account - serviceAccount: - enabled: true - create: true - # name: - # imagePullSecrets: - # - name: myimagepullsecret - - ## The storage volume used by each Pod in the StatefulSet. If a - ## persistentVolume is not enabled, the Pods will use `emptyDir` ephemeral - ## local storage. Setting the storageClass attribute to "-" disables dynamic - ## provisioning of Persistent Volumes; leaving it unset will invoke the default - ## provisioner. - persistentVolume: - enabled: false - accessModes: - - ReadWriteOnce - size: 10Gi - storageClass: "" - - ## The CouchDB image + # -- We use a custom CouchDB image for running Budibase and we don't support + # using any other CouchDB image. You shouldn't change this, and if you do we + # can't guarantee that Budibase will work. image: - repository: couchdb - tag: 3.1.1 - pullPolicy: IfNotPresent - - ## Experimental integration with Lucene-powered fulltext search - enableSearch: true - searchImage: - repository: kocolosk/couchdb-search - tag: 0.2.0 - pullPolicy: IfNotPresent - - initImage: - repository: busybox - tag: latest + # @ignore + repository: budibase/couchdb + # @ignore + tag: v3.2.1 + # @ignore pullPolicy: Always - ## CouchDB is happy to spin up cluster nodes in parallel, but if you encounter - ## problems you can try setting podManagementPolicy to the StatefulSet default - ## `OrderedReady` - podManagementPolicy: Parallel + # @ignore + # This should remain false. We ship Clouseau ourselves as part of the + # budibase/couchdb image, and it's not possible to disable it because it's a + # core part of the Budibase experience. + enableSearch: false - ## Optional pod annotations - annotations: {} - - ## Optional tolerations - tolerations: [] - - affinity: {} - - service: - # annotations: - enabled: true - type: ClusterIP - externalPort: 5984 - - ## An Ingress resource can provide name-based virtual hosting and TLS - ## termination among other things for CouchDB deployments which are accessed - ## from outside the Kubernetes cluster. - ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ - ingress: - enabled: false - hosts: - - chart-example.local - path: / - annotations: - [] - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - tls: - # Secrets must be manually created in the namespace. - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - - ## Optional resource requests and limits for the CouchDB container - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - resources: - {} - # requests: - # cpu: 100m - # memory: 128Mi - # limits: - # cpu: 56 - # memory: 256Gi - - ## erlangFlags is a map that is passed to the Erlang VM as flags using the - ## ERL_FLAGS env. `name` and `setcookie` flags are minimally required to - ## establish connectivity between cluster nodes. - ## ref: http://erlang.org/doc/man/erl.html#init_flags - erlangFlags: - name: couchdb - setcookie: monster - - ## couchdbConfig will override default CouchDB configuration settings. - ## The contents of this map are reformatted into a .ini file laid down - ## by a ConfigMap object. - ## ref: http://docs.couchdb.org/en/latest/config/index.html couchdbConfig: couchdb: - uuid: budibase-couchdb # REQUIRED: Unique identifier for this CouchDB server instance - # cluster: - # q: 8 # Create 8 shards for each database - chttpd: - bind_address: any - # chttpd.require_valid_user disables all the anonymous requests to the port - # 5984 when is set to true. - require_valid_user: false - - # Kubernetes local cluster domain. - # This is used to generate FQDNs for peers when joining the CouchDB cluster. - dns: - clusterDomainSuffix: cluster.local - - ## Configure liveness and readiness probe values - ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes - # FOR COUCHDB - livenessProbe: - failureThreshold: 3 - initialDelaySeconds: 0 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 0 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 + # -- Unique identifier for this CouchDB server instance. You shouldn't need + # to change this. + uuid: budibase-couchdb diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 77afd9453b..311afbe706 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -84,13 +84,13 @@ Component libraries are collections of components as well as the definition of t - If the project diverges from your branch, please rebase instead of merging. This makes the commit graph easier to read. -- Once your work is completed, please raise a PR against the `develop` branch with some information about what has changed and why. +- Once your work is completed, please raise a PR against the `master` branch with some information about what has changed and why. ### Getting Started For Contributors #### 1. Prerequisites -- NodeJS version `18.x.x` +- NodeJS version `20.x.x` - Python version `3.x` ### Using asdf (recommended) @@ -246,7 +246,7 @@ From here - to develop a change in pro, you can follow the below flow: cd packages/pro # get the base branch you are working from (same as monorepo) git fetch -git checkout +git checkout master # create a branch, named the same as the branch in your monorepo git checkout -b ... make changes diff --git a/eslint-local-rules/index.js b/eslint-local-rules/index.js new file mode 100644 index 0000000000..af02599c90 --- /dev/null +++ b/eslint-local-rules/index.js @@ -0,0 +1,21 @@ +module.exports = { + "no-budibase-imports": { + create: function (context) { + return { + ImportDeclaration(node) { + const importPath = node.source.value + + if ( + /^@budibase\/[^/]+\/.*$/.test(importPath) && + importPath !== "@budibase/backend-core/tests" + ) { + context.report({ + node, + message: `Importing from @budibase is not allowed, except for @budibase/backend-core/tests.`, + }) + } + }, + } + }, + }, +} diff --git a/examples/nextjs-api-sales/package.json b/examples/nextjs-api-sales/package.json index 41ce52e952..9303874a77 100644 --- a/examples/nextjs-api-sales/package.json +++ b/examples/nextjs-api-sales/package.json @@ -22,6 +22,6 @@ "@types/react": "17.0.39", "eslint": "8.10.0", "eslint-config-next": "12.1.0", - "typescript": "4.6.2" + "typescript": "5.2.2" } -} \ No newline at end of file +} diff --git a/hosting/couchdb/Dockerfile b/hosting/couchdb/Dockerfile index 792856cac7..f83df7038b 100644 --- a/hosting/couchdb/Dockerfile +++ b/hosting/couchdb/Dockerfile @@ -29,7 +29,6 @@ WORKDIR /opt/couchdb ADD couch/vm.args couch/local.ini ./etc/ WORKDIR / -ADD build-target-paths.sh . ADD runner.sh ./bbcouch-runner.sh -RUN chmod +x ./bbcouch-runner.sh /opt/clouseau/bin/clouseau ./build-target-paths.sh +RUN chmod +x ./bbcouch-runner.sh /opt/clouseau/bin/clouseau CMD ["./bbcouch-runner.sh"] diff --git a/hosting/couchdb/build-target-paths.sh b/hosting/couchdb/build-target-paths.sh deleted file mode 100644 index 67e1765ca8..0000000000 --- a/hosting/couchdb/build-target-paths.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -echo ${TARGETBUILD} > /buildtarget.txt -if [[ "${TARGETBUILD}" = "aas" ]]; then - # Azure AppService uses /home for persisent data & SSH on port 2222 - DATA_DIR=/home - WEBSITES_ENABLE_APP_SERVICE_STORAGE=true - mkdir -p $DATA_DIR/{search,minio,couch} - mkdir -p $DATA_DIR/couch/{dbs,views} - chown -R couchdb:couchdb $DATA_DIR/couch/ - apt update - apt-get install -y openssh-server - echo "root:Docker!" | chpasswd - mkdir -p /tmp - chmod +x /tmp/ssh_setup.sh \ - && (sleep 1;/tmp/ssh_setup.sh 2>&1 > /dev/null) - cp /etc/sshd_config /etc/ssh/sshd_config - /etc/init.d/ssh restart - sed -i "s#DATA_DIR#/home#g" /opt/clouseau/clouseau.ini - sed -i "s#DATA_DIR#/home#g" /opt/couchdb/etc/local.ini -else - sed -i "s#DATA_DIR#/data#g" /opt/clouseau/clouseau.ini - sed -i "s#DATA_DIR#/data#g" /opt/couchdb/etc/local.ini -fi \ No newline at end of file diff --git a/hosting/couchdb/runner.sh b/hosting/couchdb/runner.sh index 4102d2a751..9f6a853ca7 100644 --- a/hosting/couchdb/runner.sh +++ b/hosting/couchdb/runner.sh @@ -1,14 +1,81 @@ #!/bin/bash DATA_DIR=${DATA_DIR:-/data} + mkdir -p ${DATA_DIR} mkdir -p ${DATA_DIR}/couch/{dbs,views} mkdir -p ${DATA_DIR}/search chown -R couchdb:couchdb ${DATA_DIR}/couch -/build-target-paths.sh + +echo ${TARGETBUILD} > /buildtarget.txt +if [[ "${TARGETBUILD}" = "aas" ]]; then + # Azure AppService uses /home for persistent data & SSH on port 2222 + DATA_DIR="${DATA_DIR:-/home}" + WEBSITES_ENABLE_APP_SERVICE_STORAGE=true + mkdir -p $DATA_DIR/{search,minio,couch} + mkdir -p $DATA_DIR/couch/{dbs,views} + chown -R couchdb:couchdb $DATA_DIR/couch/ + apt update + apt-get install -y openssh-server + echo "root:Docker!" | chpasswd + mkdir -p /tmp + chmod +x /tmp/ssh_setup.sh \ + && (sleep 1;/tmp/ssh_setup.sh 2>&1 > /dev/null) + cp /etc/sshd_config /etc/ssh/sshd_config + /etc/init.d/ssh restart + sed -i "s#DATA_DIR#/home#g" /opt/clouseau/clouseau.ini + sed -i "s#DATA_DIR#/home#g" /opt/couchdb/etc/local.ini +elif [[ "${TARGETBUILD}" = "single" ]]; then + # In the single image build, the Dockerfile specifies /data as a volume + # mount, so we use that for all persistent data. + sed -i "s#DATA_DIR#/data#g" /opt/clouseau/clouseau.ini + sed -i "s#DATA_DIR#/data#g" /opt/couchdb/etc/local.ini +elif [[ "${TARGETBUILD}" = "docker-compose" ]]; then + # We remove the database_dir and view_index_dir settings from the local.ini + # in docker-compose because it will default to /opt/couchdb/data which is what + # our docker-compose was using prior to us switching to using our own CouchDB + # image. + sed -i "s#^database_dir.*\$##g" /opt/couchdb/etc/local.ini + sed -i "s#^view_index_dir.*\$##g" /opt/couchdb/etc/local.ini + sed -i "s#^dir=.*\$#dir=/opt/couchdb/data#g" /opt/clouseau/clouseau.ini +elif [[ -n $KUBERNETES_SERVICE_HOST ]]; then + # In Kubernetes the directory /opt/couchdb/data has a persistent volume + # mount for storing database data. + sed -i "s#^dir=.*\$#dir=/opt/couchdb/data#g" /opt/clouseau/clouseau.ini + + # We remove the database_dir and view_index_dir settings from the local.ini + # in Kubernetes because it will default to /opt/couchdb/data which is what + # our Helm chart was using prior to us switching to using our own CouchDB + # image. + sed -i "s#^database_dir.*\$##g" /opt/couchdb/etc/local.ini + sed -i "s#^view_index_dir.*\$##g" /opt/couchdb/etc/local.ini + + # We remove the -name setting from the vm.args file in Kubernetes because + # it will default to the pod FQDN, which is what's required for clustering + # to work. + sed -i "s/^-name .*$//g" /opt/couchdb/etc/vm.args +else + # For all other builds, we use /data for persistent data. + sed -i "s#DATA_DIR#/data#g" /opt/clouseau/clouseau.ini + sed -i "s#DATA_DIR#/data#g" /opt/couchdb/etc/local.ini +fi + +# Start Clouseau. Budibase won't function correctly without Clouseau running, it +# powers the search API endpoints which are used to do all sorts, including +# populating app grids. /opt/clouseau/bin/clouseau > /dev/stdout 2>&1 & + +# Start CouchDB. /docker-entrypoint.sh /opt/couchdb/bin/couchdb & -sleep 10 -curl -X PUT http://${COUCHDB_USER}:${COUCHDB_PASSWORD}@localhost:5984/_users -curl -X PUT http://${COUCHDB_USER}:${COUCHDB_PASSWORD}@localhost:5984/_replicator + +# Wati for CouchDB to start up. +while [[ $(curl -s -w "%{http_code}\n" http://localhost:5984/_up -o /dev/null) -ne 200 ]]; do + echo 'Waiting for CouchDB to start...'; + sleep 5; +done + +# CouchDB needs the `_users` and `_replicator` databases to exist before it will +# function correctly, so we create them here. +curl -X PUT -u "${COUCHDB_USER}:${COUCHDB_PASSWORD}" http://localhost:5984/_users +curl -X PUT -u "${COUCHDB_USER}:${COUCHDB_PASSWORD}" http://localhost:5984/_replicator sleep infinity \ No newline at end of file diff --git a/hosting/docker-compose.build.yaml b/hosting/docker-compose.build.yaml index 7ead001a1c..dbc3613599 100644 --- a/hosting/docker-compose.build.yaml +++ b/hosting/docker-compose.build.yaml @@ -6,7 +6,7 @@ services: app-service: build: context: .. - dockerfile: packages/server/Dockerfile.v2 + dockerfile: packages/server/Dockerfile args: - BUDIBASE_VERSION=0.0.0+dev-docker container_name: build-bbapps @@ -36,7 +36,7 @@ services: worker-service: build: context: .. - dockerfile: packages/worker/Dockerfile.v2 + dockerfile: packages/worker/Dockerfile args: - BUDIBASE_VERSION=0.0.0+dev-docker container_name: build-bbworker diff --git a/hosting/docker-compose.yaml b/hosting/docker-compose.yaml index 8f66d211f7..36b88466fe 100644 --- a/hosting/docker-compose.yaml +++ b/hosting/docker-compose.yaml @@ -26,7 +26,7 @@ services: BB_ADMIN_USER_EMAIL: ${BB_ADMIN_USER_EMAIL} BB_ADMIN_USER_PASSWORD: ${BB_ADMIN_USER_PASSWORD} PLUGINS_DIR: ${PLUGINS_DIR} - OFFLINE_MODE: ${OFFLINE_MODE} + OFFLINE_MODE: ${OFFLINE_MODE:-} depends_on: - worker-service - redis-service @@ -53,11 +53,10 @@ services: INTERNAL_API_KEY: ${INTERNAL_API_KEY} REDIS_URL: redis-service:6379 REDIS_PASSWORD: ${REDIS_PASSWORD} - OFFLINE_MODE: ${OFFLINE_MODE} + OFFLINE_MODE: ${OFFLINE_MODE:-} depends_on: - redis-service - minio-service - - couch-init minio-service: restart: unless-stopped @@ -70,7 +69,7 @@ services: MINIO_BROWSER: "off" command: server /data --console-address ":9001" healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] + test: "timeout 5s bash -c ':> /dev/tcp/127.0.0.1/9000' || exit 1" interval: 30s timeout: 20s retries: 3 @@ -98,30 +97,19 @@ services: couchdb-service: restart: unless-stopped - image: ibmcom/couchdb3 + image: budibase/couchdb + pull_policy: always environment: - COUCHDB_PASSWORD=${COUCH_DB_PASSWORD} - COUCHDB_USER=${COUCH_DB_USER} + - TARGETBUILD=docker-compose volumes: - couchdb3_data:/opt/couchdb/data - couch-init: - image: curlimages/curl - environment: - PUT_CALL: "curl -u ${COUCH_DB_USER}:${COUCH_DB_PASSWORD} -X PUT couchdb-service:5984" - depends_on: - - couchdb-service - command: - [ - "sh", - "-c", - "sleep 10 && $${PUT_CALL}/_users && $${PUT_CALL}/_replicator; fg;", - ] - redis-service: restart: unless-stopped image: redis - command: redis-server --requirepass ${REDIS_PASSWORD} + command: redis-server --requirepass "${REDIS_PASSWORD}" volumes: - redis_data:/data diff --git a/hosting/nginx.dev.conf b/hosting/nginx.dev.conf index 915125cbce..f0a58a9a98 100644 --- a/hosting/nginx.dev.conf +++ b/hosting/nginx.dev.conf @@ -42,7 +42,7 @@ http { server { listen 10000 default_server; server_name _; - client_max_body_size 1000m; + client_max_body_size 50000m; ignore_invalid_headers off; proxy_buffering off; diff --git a/hosting/proxy/nginx.prod.conf b/hosting/proxy/nginx.prod.conf index 6da2e4a1c3..65cc3ff390 100644 --- a/hosting/proxy/nginx.prod.conf +++ b/hosting/proxy/nginx.prod.conf @@ -249,4 +249,31 @@ http { gzip_comp_level 6; gzip_types text/plain text/css text/xml application/json application/javascript application/rss+xml application/atom+xml image/svg+xml; } + + # From https://docs.datadoghq.com/integrations/nginx/?tab=kubernetes + server { + listen 81; + server_name localhost; + + access_log off; + allow 127.0.0.1; + allow 10.0.0.0/8; + deny all; + + location /nginx_status { + # Choose your status module + + # freely available with open source NGINX + stub_status; + + # for open source NGINX < version 1.7.5 + # stub_status on; + + # available only with NGINX Plus + # status; + + # ensures the version information can be retrieved + server_tokens on; + } + } } diff --git a/hosting/scripts/build-target-paths.sh b/hosting/scripts/build-target-paths.sh deleted file mode 100644 index 67e1765ca8..0000000000 --- a/hosting/scripts/build-target-paths.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -echo ${TARGETBUILD} > /buildtarget.txt -if [[ "${TARGETBUILD}" = "aas" ]]; then - # Azure AppService uses /home for persisent data & SSH on port 2222 - DATA_DIR=/home - WEBSITES_ENABLE_APP_SERVICE_STORAGE=true - mkdir -p $DATA_DIR/{search,minio,couch} - mkdir -p $DATA_DIR/couch/{dbs,views} - chown -R couchdb:couchdb $DATA_DIR/couch/ - apt update - apt-get install -y openssh-server - echo "root:Docker!" | chpasswd - mkdir -p /tmp - chmod +x /tmp/ssh_setup.sh \ - && (sleep 1;/tmp/ssh_setup.sh 2>&1 > /dev/null) - cp /etc/sshd_config /etc/ssh/sshd_config - /etc/init.d/ssh restart - sed -i "s#DATA_DIR#/home#g" /opt/clouseau/clouseau.ini - sed -i "s#DATA_DIR#/home#g" /opt/couchdb/etc/local.ini -else - sed -i "s#DATA_DIR#/data#g" /opt/clouseau/clouseau.ini - sed -i "s#DATA_DIR#/data#g" /opt/couchdb/etc/local.ini -fi \ No newline at end of file diff --git a/hosting/single/Dockerfile b/hosting/single/Dockerfile index c7b90dbdc4..67ac677984 100644 --- a/hosting/single/Dockerfile +++ b/hosting/single/Dockerfile @@ -1,44 +1,59 @@ -FROM node:18-slim as build +FROM node:20-slim as build # install node-gyp dependencies -RUN apt-get update && apt-get upgrade -y && apt-get install -y --no-install-recommends apt-utils cron g++ make python3 +RUN apt-get update && apt-get install -y --no-install-recommends g++ make python3 jq -# add pin script -WORKDIR / -ADD scripts/cleanup.sh ./ -RUN chmod +x /cleanup.sh -# build server +# copy and install dependencies WORKDIR /app -ADD packages/server . +COPY package.json . COPY yarn.lock . -RUN yarn install --production=true --network-timeout 1000000 -RUN /cleanup.sh +COPY lerna.json . +COPY .yarnrc . -# build worker -WORKDIR /worker -ADD packages/worker . -COPY yarn.lock . -RUN yarn install --production=true --network-timeout 1000000 -RUN /cleanup.sh +COPY packages/server/package.json packages/server/package.json +COPY packages/worker/package.json packages/worker/package.json +# string-templates does not get bundled during the esbuild process, so we want to use the local version +COPY packages/string-templates/package.json packages/string-templates/package.json -FROM budibase/couchdb + +COPY scripts/removeWorkspaceDependencies.sh scripts/removeWorkspaceDependencies.sh +RUN chmod +x ./scripts/removeWorkspaceDependencies.sh +RUN ./scripts/removeWorkspaceDependencies.sh packages/server/package.json +RUN ./scripts/removeWorkspaceDependencies.sh packages/worker/package.json + + +# We will never want to sync pro, but the script is still required +RUN echo '' > scripts/syncProPackage.js +RUN jq 'del(.scripts.postinstall)' package.json > temp.json && mv temp.json package.json +RUN ./scripts/removeWorkspaceDependencies.sh package.json +RUN --mount=type=cache,target=/root/.yarn YARN_CACHE_FOLDER=/root/.yarn yarn install --production + +# copy the actual code +COPY packages/server/dist packages/server/dist +COPY packages/server/pm2.config.js packages/server/pm2.config.js +COPY packages/server/client packages/server/client +COPY packages/server/builder packages/server/builder +COPY packages/worker/dist packages/worker/dist +COPY packages/worker/pm2.config.js packages/worker/pm2.config.js +COPY packages/string-templates packages/string-templates + + +FROM budibase/couchdb as runner ARG TARGETARCH ENV TARGETARCH $TARGETARCH +ENV NODE_MAJOR 20 #TARGETBUILD can be set to single (for single docker image) or aas (for azure app service) # e.g. docker build --build-arg TARGETBUILD=aas .... ARG TARGETBUILD=single ENV TARGETBUILD $TARGETBUILD -COPY --from=build /app /app -COPY --from=build /worker /worker - # install base dependencies RUN apt-get update && \ - apt-get install -y --no-install-recommends software-properties-common nginx uuid-runtime redis-server + apt-get install -y --no-install-recommends software-properties-common nginx uuid-runtime redis-server libaio1 # Install postgres client for pg_dump utils -RUN apt install software-properties-common apt-transport-https gpg -y \ +RUN apt install -y software-properties-common apt-transport-https ca-certificates gnupg \ && curl -fsSl https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor | tee /usr/share/keyrings/postgresql.gpg > /dev/null \ && echo deb [arch=amd64,arm64,ppc64el signed-by=/usr/share/keyrings/postgresql.gpg] http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main | tee /etc/apt/sources.list.d/postgresql.list \ && apt update -y \ @@ -47,14 +62,12 @@ RUN apt install software-properties-common apt-transport-https gpg -y \ # install other dependencies, nodejs, oracle requirements, jdk8, redis, nginx WORKDIR /nodejs -RUN curl -sL https://deb.nodesource.com/setup_16.x -o /tmp/nodesource_setup.sh && \ - bash /tmp/nodesource_setup.sh && \ - apt-get install -y --no-install-recommends libaio1 nodejs && \ - npm install --global yarn pm2 +COPY scripts/install-node.sh ./install.sh +RUN chmod +x install.sh && ./install.sh # setup nginx -ADD hosting/single/nginx/nginx.conf /etc/nginx -ADD hosting/single/nginx/nginx-default-site.conf /etc/nginx/sites-enabled/default +COPY hosting/single/nginx/nginx.conf /etc/nginx +COPY hosting/single/nginx/nginx-default-site.conf /etc/nginx/sites-enabled/default RUN mkdir -p /var/log/nginx && \ touch /var/log/nginx/error.log && \ touch /var/run/nginx.pid && \ @@ -62,29 +75,39 @@ RUN mkdir -p /var/log/nginx && \ WORKDIR / RUN mkdir -p scripts/integrations/oracle -ADD packages/server/scripts/integrations/oracle scripts/integrations/oracle +COPY packages/server/scripts/integrations/oracle scripts/integrations/oracle RUN /bin/bash -e ./scripts/integrations/oracle/instantclient/linux/install.sh # setup minio WORKDIR /minio -ADD scripts/install-minio.sh ./install.sh +COPY scripts/install-minio.sh ./install.sh RUN chmod +x install.sh && ./install.sh # setup runner file WORKDIR / -ADD hosting/single/runner.sh . +COPY hosting/single/runner.sh . RUN chmod +x ./runner.sh -ADD hosting/single/healthcheck.sh . +COPY hosting/single/healthcheck.sh . RUN chmod +x ./healthcheck.sh # Script below sets the path for storing data based on $DATA_DIR # For Azure App Service install SSH & point data locations to /home -ADD hosting/single/ssh/sshd_config /etc/ -ADD hosting/single/ssh/ssh_setup.sh /tmp -RUN /build-target-paths.sh +COPY hosting/single/ssh/sshd_config /etc/ +COPY hosting/single/ssh/ssh_setup.sh /tmp + +# setup letsencrypt certificate +RUN apt-get install -y certbot python3-certbot-nginx +COPY hosting/letsencrypt /app/letsencrypt +RUN chmod +x /app/letsencrypt/certificate-request.sh /app/letsencrypt/certificate-renew.sh + +COPY --from=build /app/node_modules /node_modules +COPY --from=build /app/package.json /package.json +COPY --from=build /app/packages/server /app +COPY --from=build /app/packages/worker /worker +COPY --from=build /app/packages/string-templates /string-templates + +RUN cd /string-templates && yarn link && cd ../app && yarn link @budibase/string-templates && cd ../worker && yarn link @budibase/string-templates -# cleanup cache -RUN yarn cache clean -f EXPOSE 80 EXPOSE 443 @@ -92,20 +115,10 @@ EXPOSE 443 EXPOSE 2222 VOLUME /data -# setup letsencrypt certificate -RUN apt-get install -y certbot python3-certbot-nginx -ADD hosting/letsencrypt /app/letsencrypt -RUN chmod +x /app/letsencrypt/certificate-request.sh /app/letsencrypt/certificate-renew.sh -# Remove cached files -RUN rm -rf \ - /root/.cache \ - /root/.npm \ - /root/.pip \ - /usr/local/share/doc \ - /usr/share/doc \ - /usr/share/man \ - /var/lib/apt/lists/* \ - /tmp/* +ARG BUDIBASE_VERSION +# Ensuring the version argument is sent +RUN test -n "$BUDIBASE_VERSION" +ENV BUDIBASE_VERSION=$BUDIBASE_VERSION HEALTHCHECK --interval=15s --timeout=15s --start-period=45s CMD "/healthcheck.sh" diff --git a/hosting/single/Dockerfile.v2 b/hosting/single/Dockerfile.v2 deleted file mode 100644 index ec03a1b5a2..0000000000 --- a/hosting/single/Dockerfile.v2 +++ /dev/null @@ -1,131 +0,0 @@ -FROM node:18-slim as build - -# install node-gyp dependencies -RUN apt-get update && apt-get install -y --no-install-recommends g++ make python3 jq - - -# copy and install dependencies -WORKDIR /app -COPY package.json . -COPY yarn.lock . -COPY lerna.json . -COPY .yarnrc . - -COPY packages/server/package.json packages/server/package.json -COPY packages/worker/package.json packages/worker/package.json -# string-templates does not get bundled during the esbuild process, so we want to use the local version -COPY packages/string-templates/package.json packages/string-templates/package.json - - -COPY scripts/removeWorkspaceDependencies.sh scripts/removeWorkspaceDependencies.sh -RUN chmod +x ./scripts/removeWorkspaceDependencies.sh -RUN ./scripts/removeWorkspaceDependencies.sh packages/server/package.json -RUN ./scripts/removeWorkspaceDependencies.sh packages/worker/package.json - - -# We will never want to sync pro, but the script is still required -RUN echo '' > scripts/syncProPackage.js -RUN jq 'del(.scripts.postinstall)' package.json > temp.json && mv temp.json package.json -RUN ./scripts/removeWorkspaceDependencies.sh package.json -RUN --mount=type=cache,target=/root/.yarn YARN_CACHE_FOLDER=/root/.yarn yarn install --production - -# copy the actual code -COPY packages/server/dist packages/server/dist -COPY packages/server/pm2.config.js packages/server/pm2.config.js -COPY packages/server/client packages/server/client -COPY packages/server/builder packages/server/builder -COPY packages/worker/dist packages/worker/dist -COPY packages/worker/pm2.config.js packages/worker/pm2.config.js -COPY packages/string-templates packages/string-templates - - -FROM budibase/couchdb as runner -ARG TARGETARCH -ENV TARGETARCH $TARGETARCH -ENV NODE_MAJOR 18 -#TARGETBUILD can be set to single (for single docker image) or aas (for azure app service) -# e.g. docker build --build-arg TARGETBUILD=aas .... -ARG TARGETBUILD=single -ENV TARGETBUILD $TARGETBUILD - -# install base dependencies -RUN apt-get update && \ - apt-get install -y --no-install-recommends software-properties-common nginx uuid-runtime redis-server libaio1 - -# Install postgres client for pg_dump utils -RUN apt install -y software-properties-common apt-transport-https ca-certificates gnupg \ - && curl -fsSl https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor | tee /usr/share/keyrings/postgresql.gpg > /dev/null \ - && echo deb [arch=amd64,arm64,ppc64el signed-by=/usr/share/keyrings/postgresql.gpg] http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main | tee /etc/apt/sources.list.d/postgresql.list \ - && apt update -y \ - && apt install postgresql-client-15 -y \ - && apt remove software-properties-common apt-transport-https gpg -y - -# install other dependencies, nodejs, oracle requirements, jdk8, redis, nginx -WORKDIR /nodejs -COPY scripts/install-node.sh ./install.sh -RUN chmod +x install.sh && ./install.sh - -# setup nginx -COPY hosting/single/nginx/nginx.conf /etc/nginx -COPY hosting/single/nginx/nginx-default-site.conf /etc/nginx/sites-enabled/default -RUN mkdir -p /var/log/nginx && \ - touch /var/log/nginx/error.log && \ - touch /var/run/nginx.pid && \ - usermod -a -G tty www-data - -WORKDIR / -RUN mkdir -p scripts/integrations/oracle -COPY packages/server/scripts/integrations/oracle scripts/integrations/oracle -RUN /bin/bash -e ./scripts/integrations/oracle/instantclient/linux/install.sh - -# setup minio -WORKDIR /minio -COPY scripts/install-minio.sh ./install.sh -RUN chmod +x install.sh && ./install.sh - -# setup runner file -WORKDIR / -COPY hosting/single/runner.sh . -RUN chmod +x ./runner.sh -COPY hosting/single/healthcheck.sh . -RUN chmod +x ./healthcheck.sh - -# Script below sets the path for storing data based on $DATA_DIR -# For Azure App Service install SSH & point data locations to /home -COPY hosting/single/ssh/sshd_config /etc/ -COPY hosting/single/ssh/ssh_setup.sh /tmp -RUN /build-target-paths.sh - - -# setup letsencrypt certificate -RUN apt-get install -y certbot python3-certbot-nginx -COPY hosting/letsencrypt /app/letsencrypt -RUN chmod +x /app/letsencrypt/certificate-request.sh /app/letsencrypt/certificate-renew.sh - -COPY --from=build /app/node_modules /node_modules -COPY --from=build /app/package.json /package.json -COPY --from=build /app/packages/server /app -COPY --from=build /app/packages/worker /worker -COPY --from=build /app/packages/string-templates /string-templates - -RUN cd /string-templates && yarn link && cd ../app && yarn link @budibase/string-templates && cd ../worker && yarn link @budibase/string-templates - - -EXPOSE 80 -EXPOSE 443 -# Expose port 2222 for SSH on Azure App Service build -EXPOSE 2222 -VOLUME /data - -ARG BUDIBASE_VERSION -# Ensuring the version argument is sent -RUN test -n "$BUDIBASE_VERSION" -ENV BUDIBASE_VERSION=$BUDIBASE_VERSION - -HEALTHCHECK --interval=15s --timeout=15s --start-period=45s CMD "/healthcheck.sh" - -# must set this just before running -ENV NODE_ENV=production -WORKDIR / - -CMD ["./runner.sh"] diff --git a/hosting/single/healthcheck.sh b/hosting/single/healthcheck.sh index 592b3e94fa..12e340062c 100644 --- a/hosting/single/healthcheck.sh +++ b/hosting/single/healthcheck.sh @@ -25,7 +25,7 @@ if [[ $(curl -s -w "%{http_code}\n" http://localhost:4002/health -o /dev/null) - healthy=false fi -if [[ $(curl -s -w "%{http_code}\n" http://localhost:5984/ -o /dev/null) -ne 200 ]]; then +if [[ $(curl -s -w "%{http_code}\n" http://localhost:5984/_up -o /dev/null) -ne 200 ]]; then echo 'ERROR: CouchDB is not running'; healthy=false fi diff --git a/hosting/single/runner.sh b/hosting/single/runner.sh index 9dc7aa25d8..886da4c916 100644 --- a/hosting/single/runner.sh +++ b/hosting/single/runner.sh @@ -7,7 +7,7 @@ declare -a DOCKER_VARS=("APP_PORT" "APPS_URL" "ARCHITECTURE" "BUDIBASE_ENVIRONME [[ -z "${BUDIBASE_ENVIRONMENT}" ]] && export BUDIBASE_ENVIRONMENT=PRODUCTION [[ -z "${CLUSTER_PORT}" ]] && export CLUSTER_PORT=80 [[ -z "${DEPLOYMENT_ENVIRONMENT}" ]] && export DEPLOYMENT_ENVIRONMENT=docker -[[ -z "${MINIO_URL}" ]] && export MINIO_URL=http://127.0.0.1:9000 +[[ -z "${MINIO_URL}" ]] && [[ -z "${USE_S3}" ]] && export MINIO_URL=http://127.0.0.1:9000 [[ -z "${NODE_ENV}" ]] && export NODE_ENV=production [[ -z "${POSTHOG_TOKEN}" ]] && export POSTHOG_TOKEN=phc_bIjZL7oh2GEUd2vqvTBH8WvrX0fWTFQMs6H5KQxiUxU [[ -z "${TENANT_FEATURE_FLAGS}" ]] && export TENANT_FEATURE_FLAGS="*:LICENSING,*:USER_GROUPS,*:ONBOARDING_TOUR" @@ -22,11 +22,11 @@ declare -a DOCKER_VARS=("APP_PORT" "APPS_URL" "ARCHITECTURE" "BUDIBASE_ENVIRONME # Azure App Service customisations if [[ "${TARGETBUILD}" = "aas" ]]; then - DATA_DIR=/home + export DATA_DIR="${DATA_DIR:-/home}" WEBSITES_ENABLE_APP_SERVICE_STORAGE=true /etc/init.d/ssh start else - DATA_DIR=${DATA_DIR:-/data} + export DATA_DIR=${DATA_DIR:-/data} fi mkdir -p ${DATA_DIR} # Mount NFS or GCP Filestore if env vars exist for it @@ -77,7 +77,12 @@ mkdir -p ${DATA_DIR}/minio chown -R couchdb:couchdb ${DATA_DIR}/couch redis-server --requirepass $REDIS_PASSWORD > /dev/stdout 2>&1 & /bbcouch-runner.sh & -/minio/minio server --console-address ":9001" ${DATA_DIR}/minio > /dev/stdout 2>&1 & + +# only start minio if use s3 isn't passed +if [[ -z "${USE_S3}" ]]; then + /minio/minio server --console-address ":9001" ${DATA_DIR}/minio > /dev/stdout 2>&1 & +fi + /etc/init.d/nginx restart if [[ ! -z "${CUSTOM_DOMAIN}" ]]; then # Add monthly cron job to renew certbot certificate diff --git a/lerna.json b/lerna.json index 25ec556c56..f8e3772d9f 100644 --- a/lerna.json +++ b/lerna.json @@ -1,10 +1,13 @@ { - "version": "2.13.3", + "version": "2.14.7", "npmClient": "yarn", "packages": [ - "packages/*" + "packages/*", + "!packages/account-portal", + "packages/account-portal/packages/*" ], "useNx": true, + "concurrency": 20, "command": { "publish": { "ignoreChanges": [ diff --git a/package.json b/package.json index 8a27cde104..8c2b6b099c 100644 --- a/package.json +++ b/package.json @@ -2,27 +2,30 @@ "name": "root", "private": true, "devDependencies": { + "@babel/core": "^7.22.5", + "@babel/eslint-parser": "^7.22.5", + "@babel/preset-env": "^7.22.5", "@esbuild-plugins/tsconfig-paths": "^0.1.2", - "@typescript-eslint/parser": "6.7.2", + "@types/node": "20.10.0", + "@typescript-eslint/parser": "6.9.0", "esbuild": "^0.18.17", "esbuild-node-externals": "^1.8.0", - "eslint": "^8.44.0", + "eslint": "^8.52.0", + "eslint-plugin-import": "^2.29.0", + "eslint-plugin-local-rules": "^2.0.0", + "eslint-plugin-svelte": "^2.34.0", "husky": "^8.0.3", "kill-port": "^1.6.1", "lerna": "7.1.1", "madge": "^6.0.0", - "minimist": "^1.2.8", "nx": "16.4.3", "nx-cloud": "16.0.5", "prettier": "2.8.8", "prettier-plugin-svelte": "^2.3.0", "svelte": "3.49.0", + "svelte-eslint-parser": "^0.33.1", "typescript": "5.2.2", - "@babel/core": "^7.22.5", - "@babel/eslint-parser": "^7.22.5", - "@babel/preset-env": "^7.22.5", - "eslint-plugin-svelte": "^2.32.2", - "svelte-eslint-parser": "^0.32.0" + "yargs": "^17.7.2" }, "scripts": { "preinstall": "node scripts/syncProPackage.js", @@ -37,13 +40,16 @@ "nuke": "yarn run nuke:packages && yarn run nuke:docker", "nuke:packages": "yarn run restore", "nuke:docker": "lerna run --stream dev:stack:nuke", - "clean": "lerna clean -y", + "clean": "lerna clean -y && echo Cleaning top level node modules 🧹 && rm -rf ./node_modules && echo Done! 🚀", "kill-builder": "kill-port 3000", "kill-server": "kill-port 4001 4002", - "kill-all": "yarn run kill-builder && yarn run kill-server", - "dev": "yarn run kill-all && lerna run --parallel prebuild && lerna run --stream dev:builder", - "dev:noserver": "yarn run kill-builder && lerna run --stream dev:stack:up && lerna run --stream dev:builder --ignore @budibase/backend-core --ignore @budibase/server --ignore @budibase/worker", - "dev:server": "yarn run kill-server && lerna run --stream dev:builder --scope @budibase/worker --scope @budibase/server", + "kill-accountportal": "kill-port 3001 4003", + "kill-all": "yarn run kill-builder && yarn run kill-server && yarn kill-accountportal", + "dev": "yarn run kill-all && lerna run --parallel prebuild && lerna run --stream dev --ignore=@budibase/account-portal-ui --ignore @budibase/account-portal-server", + "dev:noserver": "yarn run kill-builder && lerna run --stream dev:stack:up && lerna run --stream dev --ignore @budibase/backend-core --ignore @budibase/server --ignore @budibase/worker --ignore=@budibase/account-portal-ui --ignore @budibase/account-portal-server", + "dev:server": "yarn run kill-server && lerna run --stream dev --scope @budibase/worker --scope @budibase/server", + "dev:accountportal": "yarn kill-accountportal && lerna run dev --stream --scope @budibase/account-portal-ui --scope @budibase/account-portal-server", + "dev:all": "yarn run kill-all && lerna run --stream dev", "dev:built": "yarn run kill-all && cd packages/server && yarn dev:stack:up && cd ../../ && lerna run --stream dev:built", "dev:docker": "yarn build --scope @budibase/server --scope @budibase/worker && docker-compose -f hosting/docker-compose.build.yaml -f hosting/docker-compose.dev.yaml --env-file hosting/.env up --build --scale proxy-service=0", "test": "lerna run --stream test --stream", @@ -77,11 +83,14 @@ "security:audit": "node scripts/audit.js", "postinstall": "husky install", "submodules:load": "git submodule init && git submodule update && yarn", - "submodules:unload": "git submodule deinit --all && yarn" + "submodules:unload": "git submodule deinit --all && yarn", + "add-app-migration": "node scripts/add-app-migration.js --title" }, "workspaces": { "packages": [ - "packages/*" + "packages/*", + "!packages/account-portal", + "packages/account-portal/packages/*" ] }, "resolutions": { @@ -91,7 +100,7 @@ "@budibase/types": "0.0.0" }, "engines": { - "node": ">=18.0.0 <19.0.0" + "node": ">=20.0.0 <21.0.0" }, "dependencies": {} } diff --git a/packages/account-portal b/packages/account-portal new file mode 160000 index 0000000000..b23fb3b179 --- /dev/null +++ b/packages/account-portal @@ -0,0 +1 @@ +Subproject commit b23fb3b17961fb04badd9487913a683fcf26dbe6 diff --git a/packages/backend-core/package.json b/packages/backend-core/package.json index dc8d71b52c..343bc67449 100644 --- a/packages/backend-core/package.json +++ b/packages/backend-core/package.json @@ -21,7 +21,7 @@ "test:watch": "jest --watchAll" }, "dependencies": { - "@budibase/nano": "10.1.3", + "@budibase/nano": "10.1.4", "@budibase/pouchdb-replication-stream": "1.2.10", "@budibase/shared-core": "0.0.0", "@budibase/types": "0.0.0", @@ -32,6 +32,7 @@ "bcryptjs": "2.4.3", "bull": "4.10.1", "correlation-id": "4.0.0", + "dd-trace": "3.13.2", "dotenv": "16.0.1", "ioredis": "5.3.2", "joi": "17.6.0", @@ -64,7 +65,6 @@ "@types/cookies": "0.7.8", "@types/jest": "29.5.5", "@types/lodash": "4.14.200", - "@types/node": "18.17.0", "@types/node-fetch": "2.6.4", "@types/pouchdb": "6.4.0", "@types/redlock": "4.0.3", @@ -72,9 +72,9 @@ "@types/tar-fs": "2.0.1", "@types/uuid": "8.3.4", "chance": "1.1.8", - "ioredis-mock": "8.7.0", - "jest": "29.6.2", - "jest-environment-node": "29.6.2", + "ioredis-mock": "8.9.0", + "jest": "29.7.0", + "jest-environment-node": "29.7.0", "jest-serial-runner": "1.2.1", "pino-pretty": "10.0.0", "pouchdb-adapter-memory": "7.2.2", diff --git a/packages/backend-core/src/auth/auth.ts b/packages/backend-core/src/auth/auth.ts index 0100a2d0e2..1951c7986c 100644 --- a/packages/backend-core/src/auth/auth.ts +++ b/packages/backend-core/src/auth/auth.ts @@ -1,5 +1,6 @@ const _passport = require("koa-passport") const LocalStrategy = require("passport-local").Strategy + import { getGlobalDB } from "../context" import { Cookie } from "../constants" import { getSessionsForUser, invalidateSessions } from "../security/sessions" @@ -18,6 +19,7 @@ import { GoogleInnerConfig, OIDCInnerConfig, PlatformLogoutOpts, + SessionCookie, SSOProviderType, } from "@budibase/types" import * as events from "../events" @@ -26,6 +28,7 @@ import { clearCookie, getCookie } from "../utils" import { ssoSaveUserNoOp } from "../middleware/passport/sso/sso" const refresh = require("passport-oauth2-refresh") + export { auditLog, authError, @@ -42,7 +45,6 @@ export const buildAuthMiddleware = authenticated export const buildTenancyMiddleware = tenancy export const buildCsrfMiddleware = csrf export const passport = _passport -export const jwt = require("jsonwebtoken") // Strategies _passport.use(new LocalStrategy(local.options, local.authenticate)) @@ -189,10 +191,10 @@ export async function platformLogout(opts: PlatformLogoutOpts) { if (!ctx) throw new Error("Koa context must be supplied to logout.") - const currentSession = getCookie(ctx, Cookie.Auth) + const currentSession = getCookie(ctx, Cookie.Auth) let sessions = await getSessionsForUser(userId) - if (keepActiveSession) { + if (currentSession && keepActiveSession) { sessions = sessions.filter( session => session.sessionId !== currentSession.sessionId ) diff --git a/packages/backend-core/src/cache/appMetadata.ts b/packages/backend-core/src/cache/appMetadata.ts index bd3efc20db..d442511fb8 100644 --- a/packages/backend-core/src/cache/appMetadata.ts +++ b/packages/backend-core/src/cache/appMetadata.ts @@ -19,7 +19,7 @@ async function populateFromDB(appId: string) { return doWithDB( appId, (db: Database) => { - return db.get(DocumentType.APP_METADATA) + return db.get(DocumentType.APP_METADATA) }, { skip_setup: true } ) diff --git a/packages/backend-core/src/cache/generic.ts b/packages/backend-core/src/cache/generic.ts index 7cd5d6227f..3ac323a8d4 100644 --- a/packages/backend-core/src/cache/generic.ts +++ b/packages/backend-core/src/cache/generic.ts @@ -1,6 +1,6 @@ -const BaseCache = require("./base") +import BaseCache from "./base" -const GENERIC = new BaseCache.default() +const GENERIC = new BaseCache() export enum CacheKey { CHECKLIST = "checklist", @@ -18,13 +18,15 @@ export enum TTL { ONE_DAY = 86400, } -function performExport(funcName: string) { - return (...args: any) => GENERIC[funcName](...args) -} - -export const keys = performExport("keys") -export const get = performExport("get") -export const store = performExport("store") -export const destroy = performExport("delete") -export const withCache = performExport("withCache") -export const bustCache = performExport("bustCache") +export const keys = (...args: Parameters) => + GENERIC.keys(...args) +export const get = (...args: Parameters) => + GENERIC.get(...args) +export const store = (...args: Parameters) => + GENERIC.store(...args) +export const destroy = (...args: Parameters) => + GENERIC.delete(...args) +export const withCache = (...args: Parameters) => + GENERIC.withCache(...args) +export const bustCache = (...args: Parameters) => + GENERIC.bustCache(...args) diff --git a/packages/backend-core/src/cache/index.ts b/packages/backend-core/src/cache/index.ts index 58928c271a..4fa986e4e2 100644 --- a/packages/backend-core/src/cache/index.ts +++ b/packages/backend-core/src/cache/index.ts @@ -2,4 +2,6 @@ export * as generic from "./generic" export * as user from "./user" export * as app from "./appMetadata" export * as writethrough from "./writethrough" +export * as invite from "./invite" +export * as passwordReset from "./passwordReset" export * from "./generic" diff --git a/packages/backend-core/src/cache/invite.ts b/packages/backend-core/src/cache/invite.ts new file mode 100644 index 0000000000..e43ebc4aa8 --- /dev/null +++ b/packages/backend-core/src/cache/invite.ts @@ -0,0 +1,86 @@ +import * as utils from "../utils" +import { Duration, DurationType } from "../utils" +import env from "../environment" +import { getTenantId } from "../context" +import * as redis from "../redis/init" + +const TTL_SECONDS = Duration.fromDays(7).toSeconds() + +interface Invite { + email: string + info: any +} + +interface InviteWithCode extends Invite { + code: string +} + +/** + * Given an invite code and invite body, allow the update an existing/valid invite in redis + * @param code The invite code for an invite in redis + * @param value The body of the updated user invitation + */ +export async function updateCode(code: string, value: Invite) { + const client = await redis.getInviteClient() + await client.store(code, value, TTL_SECONDS) +} + +/** + * Generates an invitation code and writes it to redis - which can later be checked for user creation. + * @param email the email address which the code is being sent to (for use later). + * @param info Information to be carried along with the invitation. + * @return returns the code that was stored to redis. + */ +export async function createCode(email: string, info: any): Promise { + const code = utils.newid() + const client = await redis.getInviteClient() + await client.store(code, { email, info }, TTL_SECONDS) + return code +} + +/** + * Checks that the provided invite code is valid - will return the email address of user that was invited. + * @param code the invite code that was provided as part of the link. + * @return If the code is valid then an email address will be returned. + */ +export async function getCode(code: string): Promise { + const client = await redis.getInviteClient() + const value = (await client.get(code)) as Invite | undefined + if (!value) { + throw "Invitation is not valid or has expired, please request a new one." + } + return value +} + +export async function deleteCode(code: string) { + const client = await redis.getInviteClient() + await client.delete(code) +} + +/** + Get all currently available user invitations for the current tenant. + **/ +export async function getInviteCodes(): Promise { + const client = await redis.getInviteClient() + const invites: { key: string; value: Invite }[] = await client.scan() + + const results: InviteWithCode[] = invites.map(invite => { + return { + ...invite.value, + code: invite.key, + } + }) + if (!env.MULTI_TENANCY) { + return results + } + const tenantId = getTenantId() + return results.filter(invite => tenantId === invite.info.tenantId) +} + +export async function getExistingInvites( + emails: string[] +): Promise { + return (await getInviteCodes()).filter(invite => + emails.includes(invite.email) + ) +} diff --git a/packages/backend-core/src/cache/passwordReset.ts b/packages/backend-core/src/cache/passwordReset.ts new file mode 100644 index 0000000000..db32b520f7 --- /dev/null +++ b/packages/backend-core/src/cache/passwordReset.ts @@ -0,0 +1,49 @@ +import * as redis from "../redis/init" +import * as utils from "../utils" +import { Duration } from "../utils" + +const TTL_SECONDS = Duration.fromHours(1).toSeconds() + +interface PasswordReset { + userId: string + info: any +} + +/** + * Given a user ID this will store a code (that is returned) for an hour in redis. + * The user can then return this code for resetting their password (through their reset link). + * @param userId the ID of the user which is to be reset. + * @param info Info about the user/the reset process. + * @return returns the code that was stored to redis. + */ +export async function createCode(userId: string, info: any): Promise { + const code = utils.newid() + const client = await redis.getPasswordResetClient() + await client.store(code, { userId, info }, TTL_SECONDS) + return code +} + +/** + * Given a reset code this will lookup to redis, check if the code is valid. + * @param code The code provided via the email link. + * @return returns the user ID if it is found + */ +export async function getCode(code: string): Promise { + const client = await redis.getPasswordResetClient() + const value = (await client.get(code)) as PasswordReset | undefined + if (!value) { + throw new Error( + "Provided information is not valid, cannot reset password - please try again." + ) + } + return value +} + +/** + * Given a reset code this will invalidate it. + * @param code The code provided via the email link. + */ +export async function invalidateCode(code: string): Promise { + const client = await redis.getPasswordResetClient() + await client.delete(code) +} diff --git a/packages/backend-core/src/cache/tests/writethrough.spec.ts b/packages/backend-core/src/cache/tests/writethrough.spec.ts index 97d3ece7a6..37887b4bd9 100644 --- a/packages/backend-core/src/cache/tests/writethrough.spec.ts +++ b/packages/backend-core/src/cache/tests/writethrough.spec.ts @@ -1,15 +1,16 @@ import { DBTestConfiguration } from "../../../tests/extra" -import { - structures, - expectFunctionWasCalledTimesWith, - mocks, -} from "../../../tests" +import { structures } from "../../../tests" import { Writethrough } from "../writethrough" import { getDB } from "../../db" +import { Document } from "@budibase/types" import tk from "timekeeper" tk.freeze(Date.now()) +interface ValueDoc extends Document { + value: any +} + const DELAY = 5000 describe("writethrough", () => { @@ -117,7 +118,7 @@ describe("writethrough", () => { describe("get", () => { it("should be able to retrieve", async () => { await config.doInTenant(async () => { - const response = await writethrough.get(docId) + const response = await writethrough.get(docId) expect(response.value).toBe(4) }) }) diff --git a/packages/backend-core/src/cache/writethrough.ts b/packages/backend-core/src/cache/writethrough.ts index c331d791a6..5cafe418d7 100644 --- a/packages/backend-core/src/cache/writethrough.ts +++ b/packages/backend-core/src/cache/writethrough.ts @@ -7,7 +7,7 @@ import * as locks from "../redis/redlockImpl" const DEFAULT_WRITE_RATE_MS = 10000 let CACHE: BaseCache | null = null -interface CacheItem { +interface CacheItem { doc: any lastWrite: number } @@ -24,7 +24,10 @@ function makeCacheKey(db: Database, key: string) { return db.name + key } -function makeCacheItem(doc: any, lastWrite: number | null = null): CacheItem { +function makeCacheItem( + doc: T, + lastWrite: number | null = null +): CacheItem { return { doc, lastWrite: lastWrite || Date.now() } } @@ -35,7 +38,7 @@ async function put( ) { const cache = await getCache() const key = doc._id - let cacheItem: CacheItem | undefined + let cacheItem: CacheItem | undefined if (key) { cacheItem = await cache.get(makeCacheKey(db, key)) } @@ -53,11 +56,8 @@ async function put( const writeDb = async (toWrite: any) => { // doc should contain the _id and _rev const response = await db.put(toWrite, { force: true }) - output = { - ...doc, - _id: response.id, - _rev: response.rev, - } + output._id = response.id + output._rev = response.rev } try { await writeDb(doc) @@ -84,12 +84,12 @@ async function put( return { ok: true, id: output._id, rev: output._rev } } -async function get(db: Database, id: string): Promise { +async function get(db: Database, id: string): Promise { const cache = await getCache() const cacheKey = makeCacheKey(db, id) - let cacheItem: CacheItem = await cache.get(cacheKey) + let cacheItem: CacheItem = await cache.get(cacheKey) if (!cacheItem) { - const doc = await db.get(id) + const doc = await db.get(id) cacheItem = makeCacheItem(doc) await cache.store(cacheKey, cacheItem) } @@ -123,8 +123,8 @@ export class Writethrough { return put(this.db, doc, writeRateMs) } - async get(id: string) { - return get(this.db, id) + async get(id: string) { + return get(this.db, id) } async remove(docOrId: any, rev?: any) { diff --git a/packages/backend-core/src/configs/configs.ts b/packages/backend-core/src/configs/configs.ts index 0c83ed005d..0d189e3f7d 100644 --- a/packages/backend-core/src/configs/configs.ts +++ b/packages/backend-core/src/configs/configs.ts @@ -17,7 +17,6 @@ import { DocumentType, SEPARATOR } from "../constants" import { CacheKey, TTL, withCache } from "../cache" import * as context from "../context" import env from "../environment" -import environment from "../environment" // UTILS @@ -181,10 +180,10 @@ export async function getGoogleDatasourceConfig(): Promise< } export function getDefaultGoogleConfig(): GoogleInnerConfig | undefined { - if (environment.GOOGLE_CLIENT_ID && environment.GOOGLE_CLIENT_SECRET) { + if (env.GOOGLE_CLIENT_ID && env.GOOGLE_CLIENT_SECRET) { return { - clientID: environment.GOOGLE_CLIENT_ID!, - clientSecret: environment.GOOGLE_CLIENT_SECRET!, + clientID: env.GOOGLE_CLIENT_ID!, + clientSecret: env.GOOGLE_CLIENT_SECRET!, activated: true, } } diff --git a/packages/backend-core/src/constants/db.ts b/packages/backend-core/src/constants/db.ts index b33b4835a9..ac00483021 100644 --- a/packages/backend-core/src/constants/db.ts +++ b/packages/backend-core/src/constants/db.ts @@ -1,4 +1,5 @@ import { prefixed, DocumentType } from "@budibase/types" + export { SEPARATOR, UNICODE_MAX, @@ -28,7 +29,7 @@ export enum ViewName { APP_BACKUP_BY_TRIGGER = "by_trigger", } -export const DeprecatedViews = { +export const DeprecatedViews: Record = { [ViewName.USER_BY_EMAIL]: [ // removed due to inaccuracy in view doc filter logic "by_email", diff --git a/packages/backend-core/src/constants/misc.ts b/packages/backend-core/src/constants/misc.ts index 8ef34196ed..aee099e10a 100644 --- a/packages/backend-core/src/constants/misc.ts +++ b/packages/backend-core/src/constants/misc.ts @@ -11,24 +11,7 @@ export enum Cookie { OIDC_CONFIG = "budibase:oidc:config", } -export enum Header { - API_KEY = "x-budibase-api-key", - LICENSE_KEY = "x-budibase-license-key", - API_VER = "x-budibase-api-version", - APP_ID = "x-budibase-app-id", - SESSION_ID = "x-budibase-session-id", - TYPE = "x-budibase-type", - PREVIEW_ROLE = "x-budibase-role", - TENANT_ID = "x-budibase-tenant-id", - VERIFICATION_CODE = "x-budibase-verification-code", - RETURN_VERIFICATION_CODE = "x-budibase-return-verification-code", - RESET_PASSWORD_CODE = "x-budibase-reset-password-code", - RETURN_RESET_PASSWORD_CODE = "x-budibase-return-reset-password-code", - TOKEN = "x-budibase-token", - CSRF_TOKEN = "x-csrf-token", - CORRELATION_ID = "x-budibase-correlation-id", - AUTHORIZATION = "authorization", -} +export { Header } from "@budibase/shared-core" export enum GlobalRole { OWNER = "owner", diff --git a/packages/backend-core/src/context/Context.ts b/packages/backend-core/src/context/Context.ts index d29b6935a8..a59f5c6503 100644 --- a/packages/backend-core/src/context/Context.ts +++ b/packages/backend-core/src/context/Context.ts @@ -4,7 +4,7 @@ import { ContextMap } from "./types" export default class Context { static storage = new AsyncLocalStorage() - static run(context: ContextMap, func: any) { + static run(context: ContextMap, func: () => T) { return Context.storage.run(context, () => func()) } diff --git a/packages/backend-core/src/context/mainContext.ts b/packages/backend-core/src/context/mainContext.ts index 609c18abb5..36fd5dcb48 100644 --- a/packages/backend-core/src/context/mainContext.ts +++ b/packages/backend-core/src/context/mainContext.ts @@ -98,17 +98,19 @@ function updateContext(updates: ContextMap): ContextMap { return context } -async function newContext(updates: ContextMap, task: any) { +async function newContext(updates: ContextMap, task: () => T) { + guardMigration() + // see if there already is a context setup let context: ContextMap = updateContext(updates) return Context.run(context, task) } -export async function doInAutomationContext(params: { +export async function doInAutomationContext(params: { appId: string automationId: string - task: any -}): Promise { + task: () => T +}): Promise { const tenantId = getTenantIDFromAppID(params.appId) return newContext( { @@ -132,7 +134,7 @@ export async function doInContext(appId: string, task: any): Promise { } export async function doInTenant( - tenantId: string | null, + tenantId: string | undefined, task: () => T ): Promise { // make sure default always selected in single tenancy @@ -144,31 +146,35 @@ export async function doInTenant( return newContext(updates, task) } -export async function doInAppContext( - appId: string | null, - task: any -): Promise { - if (!appId && !env.isTest()) { +export async function doInAppContext( + appId: string, + task: () => T +): Promise { + return _doInAppContext(appId, task) +} + +async function _doInAppContext( + appId: string, + task: () => T, + extraContextSettings?: ContextMap +): Promise { + if (!appId) { throw new Error("appId is required") } - let updates: ContextMap - if (!appId) { - updates = { appId: "" } - } else { - const tenantId = getTenantIDFromAppID(appId) - updates = { appId } - if (tenantId) { - updates.tenantId = tenantId - } + const tenantId = getTenantIDFromAppID(appId) + const updates: ContextMap = { appId, ...extraContextSettings } + if (tenantId) { + updates.tenantId = tenantId } + return newContext(updates, task) } -export async function doInIdentityContext( +export async function doInIdentityContext( identity: IdentityContext, - task: any -): Promise { + task: () => T +): Promise { if (!identity) { throw new Error("identity is required") } @@ -182,6 +188,24 @@ export async function doInIdentityContext( return newContext(context, task) } +function guardMigration() { + const context = Context.get() + if (context?.isMigrating) { + throw new Error( + "The context cannot be changed, a migration is currently running" + ) + } +} + +export async function doInAppMigrationContext( + appId: string, + task: () => T +): Promise { + return _doInAppContext(appId, task, { + isMigrating: true, + }) +} + export function getIdentity(): IdentityContext | undefined { try { const context = Context.get() @@ -276,6 +300,9 @@ export function getAuditLogsDB(): Database { */ export function getAppDB(opts?: any): Database { const appId = getAppId() + if (!appId) { + throw new Error("Unable to retrieve app DB - no app ID.") + } return getDB(appId, opts) } @@ -308,3 +335,11 @@ export function isScim(): boolean { const scimCall = context?.isScim return !!scimCall } + +export function getCurrentContext(): ContextMap | undefined { + try { + return Context.get() + } catch (e) { + return undefined + } +} diff --git a/packages/backend-core/src/context/tests/index.spec.ts b/packages/backend-core/src/context/tests/index.spec.ts index d8bb598af1..cfc820e169 100644 --- a/packages/backend-core/src/context/tests/index.spec.ts +++ b/packages/backend-core/src/context/tests/index.spec.ts @@ -1,6 +1,11 @@ import { testEnv } from "../../../tests/extra" import * as context from "../" import { DEFAULT_TENANT_ID } from "../../constants" +import { structures } from "../../../tests" +import { db } from "../.." +import Context from "../Context" +import { ContextMap } from "../types" +import { IdentityType } from "@budibase/types" describe("context", () => { describe("doInTenant", () => { @@ -144,4 +149,107 @@ describe("context", () => { expect(isScim).toBe(false) }) }) + + describe("doInAppMigrationContext", () => { + it("the context is set correctly", async () => { + const appId = db.generateAppID() + + await context.doInAppMigrationContext(appId, () => { + const context = Context.get() + + const expected: ContextMap = { + appId, + isMigrating: true, + } + expect(context).toEqual(expected) + }) + }) + + it("the context is set correctly when running in a tenant id", async () => { + const tenantId = structures.tenant.id() + const appId = db.generateAppID(tenantId) + + await context.doInAppMigrationContext(appId, () => { + const context = Context.get() + + const expected: ContextMap = { + appId, + isMigrating: true, + tenantId, + } + expect(context).toEqual(expected) + }) + }) + + it("the context is not modified outside the delegate", async () => { + const appId = db.generateAppID() + + expect(Context.get()).toBeUndefined() + + await context.doInAppMigrationContext(appId, () => { + const context = Context.get() + + const expected: ContextMap = { + appId, + isMigrating: true, + } + expect(context).toEqual(expected) + }) + + expect(Context.get()).toBeUndefined() + }) + + it.each([ + [ + "doInAppMigrationContext", + () => context.doInAppMigrationContext(db.generateAppID(), () => {}), + ], + [ + "doInAppContext", + () => context.doInAppContext(db.generateAppID(), () => {}), + ], + [ + "doInAutomationContext", + () => + context.doInAutomationContext({ + appId: db.generateAppID(), + automationId: structures.generator.guid(), + task: () => {}, + }), + ], + ["doInContext", () => context.doInContext(db.generateAppID(), () => {})], + [ + "doInEnvironmentContext", + () => context.doInEnvironmentContext({}, () => {}), + ], + [ + "doInIdentityContext", + () => + context.doInIdentityContext( + { + account: undefined, + type: IdentityType.USER, + _id: structures.users.user()._id!, + }, + () => {} + ), + ], + ["doInScimContext", () => context.doInScimContext(() => {})], + [ + "doInTenant", + () => context.doInTenant(structures.tenant.id(), () => {}), + ], + ])( + "a nested context.%s function cannot run", + async (_, otherContextCall: () => Promise) => { + await expect( + context.doInAppMigrationContext(db.generateAppID(), async () => { + await otherContextCall() + }) + ).rejects.toThrowError( + "The context cannot be changed, a migration is currently running" + ) + } + ) + }) }) diff --git a/packages/backend-core/src/context/types.ts b/packages/backend-core/src/context/types.ts index d687a93594..f73dc9f5c7 100644 --- a/packages/backend-core/src/context/types.ts +++ b/packages/backend-core/src/context/types.ts @@ -1,4 +1,5 @@ import { IdentityContext } from "@budibase/types" +import { ExecutionTimeTracker } from "../timers" // keep this out of Budibase types, don't want to expose context info export type ContextMap = { @@ -8,4 +9,6 @@ export type ContextMap = { environmentVariables?: Record isScim?: boolean automationId?: string + isMigrating?: boolean + jsExecutionTracker?: ExecutionTimeTracker } diff --git a/packages/backend-core/src/db/couch/DatabaseImpl.ts b/packages/backend-core/src/db/couch/DatabaseImpl.ts index 29ca4123f5..0e2b4173b0 100644 --- a/packages/backend-core/src/db/couch/DatabaseImpl.ts +++ b/packages/backend-core/src/db/couch/DatabaseImpl.ts @@ -10,12 +10,16 @@ import { DatabaseDeleteIndexOpts, Document, isDocument, + RowResponse, } from "@budibase/types" import { getCouchInfo } from "./connections" import { directCouchUrlCall } from "./utils" import { getPouchDB } from "./pouchDB" import { WriteStream, ReadStream } from "fs" import { newid } from "../../docIds/newid" +import { DDInstrumentedDatabase } from "../instrumentation" + +const DATABASE_NOT_FOUND = "Database does not exist." function buildNano(couchInfo: { url: string; cookie: string }) { return Nano({ @@ -29,15 +33,15 @@ function buildNano(couchInfo: { url: string; cookie: string }) { }) } +type DBCall = () => Promise + export function DatabaseWithConnection( dbName: string, connection: string, opts?: DatabaseOpts ) { - if (!connection) { - throw new Error("Must provide connection details") - } - return new DatabaseImpl(dbName, opts, connection) + const db = new DatabaseImpl(dbName, opts, connection) + return new DDInstrumentedDatabase(db) } export class DatabaseImpl implements Database { @@ -48,10 +52,7 @@ export class DatabaseImpl implements Database { private readonly couchInfo = getCouchInfo() - constructor(dbName?: string, opts?: DatabaseOpts, connection?: string) { - if (dbName == null) { - throw new Error("Database name cannot be undefined.") - } + constructor(dbName: string, opts?: DatabaseOpts, connection?: string) { this.name = dbName this.pouchOpts = opts || {} if (connection) { @@ -81,7 +82,11 @@ export class DatabaseImpl implements Database { return this.instanceNano || DatabaseImpl.nano } - async checkSetup() { + private getDb() { + return this.nano().db.use(this.name) + } + + private async checkAndCreateDb() { let shouldCreate = !this.pouchOpts?.skip_setup // check exists in a lightweight fashion let exists = await this.exists() @@ -98,45 +103,84 @@ export class DatabaseImpl implements Database { } } } - return this.nano().db.use(this.name) + return this.getDb() } - private async updateOutput(fnc: any) { + // this function fetches the DB and handles if DB creation is needed + private async performCall( + call: (db: Nano.DocumentScope) => Promise> | DBCall + ): Promise { + const db = this.getDb() + const fnc = await call(db) try { return await fnc() } catch (err: any) { - if (err.statusCode) { + if (err.statusCode === 404 && err.reason === DATABASE_NOT_FOUND) { + await this.checkAndCreateDb() + return await this.performCall(call) + } else if (err.statusCode) { err.status = err.statusCode } throw err } } - async get(id?: string): Promise { - const db = await this.checkSetup() - if (!id) { - throw new Error("Unable to get doc without a valid _id.") + async get(id?: string): Promise { + return this.performCall(db => { + if (!id) { + throw new Error("Unable to get doc without a valid _id.") + } + return () => db.get(id) + }) + } + + async getMultiple( + ids: string[], + opts?: { allowMissing?: boolean } + ): Promise { + // get unique + ids = [...new Set(ids)] + const response = await this.allDocs({ + keys: ids, + include_docs: true, + }) + const rowUnavailable = (row: RowResponse) => { + // row is deleted - key lookup can return this + if (row.doc == null || ("deleted" in row.value && row.value.deleted)) { + return true + } + return row.error === "not_found" } - return this.updateOutput(() => db.get(id)) + + const rows = response.rows.filter(row => !rowUnavailable(row)) + const someMissing = rows.length !== response.rows.length + // some were filtered out - means some missing + if (!opts?.allowMissing && someMissing) { + const missing = response.rows.filter(row => rowUnavailable(row)) + const missingIds = missing.map(row => row.key).join(", ") + throw new Error(`Unable to get documents: ${missingIds}`) + } + return rows.map(row => row.doc!) } async remove(idOrDoc: string | Document, rev?: string) { - const db = await this.checkSetup() - let _id: string - let _rev: string + return this.performCall(db => { + let _id: string + let _rev: string - if (isDocument(idOrDoc)) { - _id = idOrDoc._id! - _rev = idOrDoc._rev! - } else { - _id = idOrDoc - _rev = rev! - } + if (isDocument(idOrDoc)) { + _id = idOrDoc._id! + _rev = idOrDoc._rev! + } else { + _id = idOrDoc + _rev = rev! + } - if (!_id || !_rev) { - throw new Error("Unable to remove doc without a valid _id and _rev.") - } - return this.updateOutput(() => db.destroy(_id, _rev)) + if (!_id || !_rev) { + throw new Error("Unable to remove doc without a valid _id and _rev.") + } + return () => db.destroy(_id, _rev) + }) } async post(document: AnyDocument, opts?: DatabasePutOpts) { @@ -150,43 +194,49 @@ export class DatabaseImpl implements Database { if (!document._id) { throw new Error("Cannot store document without _id field.") } - const db = await this.checkSetup() - if (!document.createdAt) { - document.createdAt = new Date().toISOString() - } - document.updatedAt = new Date().toISOString() - if (opts?.force && document._id) { - try { - const existing = await this.get(document._id) - if (existing) { - document._rev = existing._rev - } - } catch (err: any) { - if (err.status !== 404) { - throw err + return this.performCall(async db => { + if (!document.createdAt) { + document.createdAt = new Date().toISOString() + } + document.updatedAt = new Date().toISOString() + if (opts?.force && document._id) { + try { + const existing = await this.get(document._id) + if (existing) { + document._rev = existing._rev + } + } catch (err: any) { + if (err.status !== 404) { + throw err + } } } - } - return this.updateOutput(() => db.insert(document)) + return () => db.insert(document) + }) } async bulkDocs(documents: AnyDocument[]) { - const db = await this.checkSetup() - return this.updateOutput(() => db.bulk({ docs: documents })) + return this.performCall(db => { + return () => db.bulk({ docs: documents }) + }) } - async allDocs(params: DatabaseQueryOpts): Promise> { - const db = await this.checkSetup() - return this.updateOutput(() => db.list(params)) + async allDocs( + params: DatabaseQueryOpts + ): Promise> { + return this.performCall(db => { + return () => db.list(params) + }) } - async query( + async query( viewName: string, params: DatabaseQueryOpts ): Promise> { - const db = await this.checkSetup() - const [database, view] = viewName.split("/") - return this.updateOutput(() => db.view(database, view, params)) + return this.performCall(db => { + const [database, view] = viewName.split("/") + return () => db.view(database, view, params) + }) } async destroy() { @@ -203,8 +253,9 @@ export class DatabaseImpl implements Database { } async compact() { - const db = await this.checkSetup() - return this.updateOutput(() => db.compact()) + return this.performCall(db => { + return () => db.compact() + }) } // All below functions are in-frequently called, just utilise PouchDB diff --git a/packages/backend-core/src/db/db.ts b/packages/backend-core/src/db/db.ts index 9aae64b892..197770298e 100644 --- a/packages/backend-core/src/db/db.ts +++ b/packages/backend-core/src/db/db.ts @@ -1,11 +1,9 @@ -import env from "../environment" import { directCouchQuery, DatabaseImpl } from "./couch" -import { CouchFindOptions, Database } from "@budibase/types" +import { CouchFindOptions, Database, DatabaseOpts } from "@budibase/types" +import { DDInstrumentedDatabase } from "./instrumentation" -const dbList = new Set() - -export function getDB(dbName?: string, opts?: any): Database { - return new DatabaseImpl(dbName, opts) +export function getDB(dbName: string, opts?: DatabaseOpts): Database { + return new DDInstrumentedDatabase(new DatabaseImpl(dbName, opts)) } // we have to use a callback for this so that we can close @@ -14,7 +12,7 @@ export function getDB(dbName?: string, opts?: any): Database { export async function doWithDB( dbName: string, cb: (db: Database) => Promise, - opts = {} + opts?: DatabaseOpts ) { const db = getDB(dbName, opts) // need this to be async so that we can correctly close DB after all @@ -22,13 +20,6 @@ export async function doWithDB( return await cb(db) } -export function allDbs() { - if (!env.isTest()) { - throw new Error("Cannot be used outside test environment.") - } - return [...dbList] -} - export async function directCouchAllDbs(queryString?: string) { let couchPath = "/_all_dbs" if (queryString) { diff --git a/packages/backend-core/src/db/instrumentation.ts b/packages/backend-core/src/db/instrumentation.ts new file mode 100644 index 0000000000..aa2ac424ae --- /dev/null +++ b/packages/backend-core/src/db/instrumentation.ts @@ -0,0 +1,149 @@ +import { + DocumentScope, + DocumentDestroyResponse, + DocumentInsertResponse, + DocumentBulkResponse, + OkResponse, +} from "@budibase/nano" +import { + AllDocsResponse, + AnyDocument, + Database, + DatabaseDumpOpts, + DatabasePutOpts, + DatabaseQueryOpts, + Document, +} from "@budibase/types" +import tracer from "dd-trace" +import { Writable } from "stream" + +export class DDInstrumentedDatabase implements Database { + constructor(private readonly db: Database) {} + + get name(): string { + return this.db.name + } + + exists(): Promise { + return tracer.trace("db.exists", span => { + span?.addTags({ db_name: this.name }) + return this.db.exists() + }) + } + + get(id?: string | undefined): Promise { + return tracer.trace("db.get", span => { + span?.addTags({ db_name: this.name, doc_id: id }) + return this.db.get(id) + }) + } + + getMultiple( + ids: string[], + opts?: { allowMissing?: boolean | undefined } | undefined + ): Promise { + return tracer.trace("db.getMultiple", span => { + span?.addTags({ + db_name: this.name, + num_docs: ids.length, + allow_missing: opts?.allowMissing, + }) + return this.db.getMultiple(ids, opts) + }) + } + + remove( + id: string | Document, + rev?: string | undefined + ): Promise { + return tracer.trace("db.remove", span => { + span?.addTags({ db_name: this.name, doc_id: id }) + return this.db.remove(id, rev) + }) + } + + put( + document: AnyDocument, + opts?: DatabasePutOpts | undefined + ): Promise { + return tracer.trace("db.put", span => { + span?.addTags({ db_name: this.name, doc_id: document._id }) + return this.db.put(document, opts) + }) + } + + bulkDocs(documents: AnyDocument[]): Promise { + return tracer.trace("db.bulkDocs", span => { + span?.addTags({ db_name: this.name, num_docs: documents.length }) + return this.db.bulkDocs(documents) + }) + } + + allDocs( + params: DatabaseQueryOpts + ): Promise> { + return tracer.trace("db.allDocs", span => { + span?.addTags({ db_name: this.name }) + return this.db.allDocs(params) + }) + } + + query( + viewName: string, + params: DatabaseQueryOpts + ): Promise> { + return tracer.trace("db.query", span => { + span?.addTags({ db_name: this.name, view_name: viewName }) + return this.db.query(viewName, params) + }) + } + + destroy(): Promise { + return tracer.trace("db.destroy", span => { + span?.addTags({ db_name: this.name }) + return this.db.destroy() + }) + } + + compact(): Promise { + return tracer.trace("db.compact", span => { + span?.addTags({ db_name: this.name }) + return this.db.compact() + }) + } + + dump(stream: Writable, opts?: DatabaseDumpOpts | undefined): Promise { + return tracer.trace("db.dump", span => { + span?.addTags({ db_name: this.name }) + return this.db.dump(stream, opts) + }) + } + + load(...args: any[]): Promise { + return tracer.trace("db.load", span => { + span?.addTags({ db_name: this.name }) + return this.db.load(...args) + }) + } + + createIndex(...args: any[]): Promise { + return tracer.trace("db.createIndex", span => { + span?.addTags({ db_name: this.name }) + return this.db.createIndex(...args) + }) + } + + deleteIndex(...args: any[]): Promise { + return tracer.trace("db.deleteIndex", span => { + span?.addTags({ db_name: this.name }) + return this.db.deleteIndex(...args) + }) + } + + getIndexes(...args: any[]): Promise { + return tracer.trace("db.getIndexes", span => { + span?.addTags({ db_name: this.name }) + return this.db.getIndexes(...args) + }) + } +} diff --git a/packages/backend-core/src/db/tests/index.spec.js b/packages/backend-core/src/db/tests/index.spec.js index 0d257f7ed7..e03c9a5b0e 100644 --- a/packages/backend-core/src/db/tests/index.spec.js +++ b/packages/backend-core/src/db/tests/index.spec.js @@ -5,7 +5,6 @@ const { getDB } = require("../db") describe("db", () => { describe("getDB", () => { it("returns a db", async () => { - const dbName = structures.db.id() const db = getDB(dbName) expect(db).toBeDefined() diff --git a/packages/backend-core/src/db/utils.ts b/packages/backend-core/src/db/utils.ts index d7a4b8224a..906a95e1db 100644 --- a/packages/backend-core/src/db/utils.ts +++ b/packages/backend-core/src/db/utils.ts @@ -6,6 +6,7 @@ import { AppState, DeletedApp, getAppMetadata } from "../cache/appMetadata" import { isDevApp, isDevAppID, getProdAppID } from "../docIds/conversions" import { App, Database } from "@budibase/types" import { getStartEndKeyURL } from "../docIds" + export * from "../docIds" /** diff --git a/packages/backend-core/src/db/views.ts b/packages/backend-core/src/db/views.ts index f0980ad217..5d9c5b74d3 100644 --- a/packages/backend-core/src/db/views.ts +++ b/packages/backend-core/src/db/views.ts @@ -7,12 +7,19 @@ import { } from "../constants" import { getGlobalDB } from "../context" import { doWithDB } from "./" -import { AllDocsResponse, Database, DatabaseQueryOpts } from "@budibase/types" +import { + AllDocsResponse, + Database, + DatabaseQueryOpts, + Document, + DesignDocument, + DBView, +} from "@budibase/types" import env from "../environment" const DESIGN_DB = "_design/database" -function DesignDoc() { +function DesignDoc(): DesignDocument { return { _id: DESIGN_DB, // view collation information, read before writing any complex views: @@ -21,20 +28,14 @@ function DesignDoc() { } } -interface DesignDocument { - views: any -} - async function removeDeprecated(db: Database, viewName: ViewName) { - // @ts-ignore if (!DeprecatedViews[viewName]) { return } try { const designDoc = await db.get(DESIGN_DB) - // @ts-ignore for (let deprecatedNames of DeprecatedViews[viewName]) { - delete designDoc.views[deprecatedNames] + delete designDoc.views?.[deprecatedNames] } await db.put(designDoc) } catch (err) { @@ -43,18 +44,18 @@ async function removeDeprecated(db: Database, viewName: ViewName) { } export async function createView( - db: any, + db: Database, viewJs: string, viewName: string ): Promise { let designDoc try { - designDoc = (await db.get(DESIGN_DB)) as DesignDocument + designDoc = await db.get(DESIGN_DB) } catch (err) { // no design doc, make one designDoc = DesignDoc() } - const view = { + const view: DBView = { map: viewJs, } designDoc.views = { @@ -109,7 +110,7 @@ export interface QueryViewOptions { arrayResponse?: boolean } -export async function queryViewRaw( +export async function queryViewRaw( viewName: ViewName, params: DatabaseQueryOpts, db: Database, @@ -137,18 +138,16 @@ export async function queryViewRaw( } } -export const queryView = async ( +export const queryView = async ( viewName: ViewName, params: DatabaseQueryOpts, db: Database, createFunc: any, opts?: QueryViewOptions -): Promise => { +): Promise => { const response = await queryViewRaw(viewName, params, db, createFunc, opts) const rows = response.rows - const docs = rows.map((row: any) => - params.include_docs ? row.doc : row.value - ) + const docs = rows.map(row => (params.include_docs ? row.doc! : row.value)) // if arrayResponse has been requested, always return array regardless of length if (opts?.arrayResponse) { @@ -198,11 +197,11 @@ export const createPlatformUserView = async () => { await createPlatformView(viewJs, ViewName.PLATFORM_USERS_LOWERCASE) } -export const queryPlatformView = async ( +export const queryPlatformView = async ( viewName: ViewName, params: DatabaseQueryOpts, opts?: QueryViewOptions -): Promise => { +): Promise => { const CreateFuncByName: any = { [ViewName.ACCOUNT_BY_EMAIL]: createPlatformAccountEmailView, [ViewName.PLATFORM_USERS_LOWERCASE]: createPlatformUserView, @@ -220,7 +219,7 @@ const CreateFuncByName: any = { [ViewName.USER_BY_APP]: createUserAppView, } -export const queryGlobalView = async ( +export const queryGlobalView = async ( viewName: ViewName, params: DatabaseQueryOpts, db?: Database, @@ -231,10 +230,10 @@ export const queryGlobalView = async ( db = getGlobalDB() } const createFn = CreateFuncByName[viewName] - return queryView(viewName, params, db!, createFn, opts) + return queryView(viewName, params, db!, createFn, opts) } -export async function queryGlobalViewRaw( +export async function queryGlobalViewRaw( viewName: ViewName, params: DatabaseQueryOpts, opts?: QueryViewOptions diff --git a/packages/backend-core/src/docIds/conversions.ts b/packages/backend-core/src/docIds/conversions.ts index b168b74e16..ec43d01389 100644 --- a/packages/backend-core/src/docIds/conversions.ts +++ b/packages/backend-core/src/docIds/conversions.ts @@ -1,5 +1,6 @@ import { APP_DEV_PREFIX, APP_PREFIX } from "../constants" import { App } from "@budibase/types" + const NO_APP_ERROR = "No app provided" export function isDevAppID(appId?: string) { diff --git a/packages/backend-core/src/environment.ts b/packages/backend-core/src/environment.ts index ed882fe96a..0fec786c31 100644 --- a/packages/backend-core/src/environment.ts +++ b/packages/backend-core/src/environment.ts @@ -107,6 +107,7 @@ const environment = { ENCRYPTION_KEY: process.env.ENCRYPTION_KEY, API_ENCRYPTION_KEY: getAPIEncryptionKey(), COUCH_DB_URL: process.env.COUCH_DB_URL || "http://localhost:4005", + COUCH_DB_SQL_URL: process.env.COUCH_DB_SQL_URL || "http://localhost:4984", COUCH_DB_USERNAME: process.env.COUCH_DB_USER, COUCH_DB_PASSWORD: process.env.COUCH_DB_PASSWORD, GOOGLE_CLIENT_ID: process.env.GOOGLE_CLIENT_ID, @@ -165,6 +166,8 @@ const environment = { DISABLE_JWT_WARNING: process.env.DISABLE_JWT_WARNING, BLACKLIST_IPS: process.env.BLACKLIST_IPS, SERVICE_TYPE: "unknown", + PASSWORD_MIN_LENGTH: process.env.PASSWORD_MIN_LENGTH, + PASSWORD_MAX_LENGTH: process.env.PASSWORD_MAX_LENGTH, /** * Enable to allow an admin user to login using a password. * This can be useful to prevent lockout when configuring SSO. diff --git a/packages/backend-core/src/events/processors/posthog/index.ts b/packages/backend-core/src/events/processors/posthog/index.ts index dceb10d2cd..5a2b1afc9f 100644 --- a/packages/backend-core/src/events/processors/posthog/index.ts +++ b/packages/backend-core/src/events/processors/posthog/index.ts @@ -1,2 +1,3 @@ import PosthogProcessor from "./PosthogProcessor" + export default PosthogProcessor diff --git a/packages/backend-core/src/events/processors/posthog/tests/PosthogProcessor.spec.ts b/packages/backend-core/src/events/processors/posthog/tests/PosthogProcessor.spec.ts index 0722fc3293..d9a5504073 100644 --- a/packages/backend-core/src/events/processors/posthog/tests/PosthogProcessor.spec.ts +++ b/packages/backend-core/src/events/processors/posthog/tests/PosthogProcessor.spec.ts @@ -1,7 +1,9 @@ import { testEnv } from "../../../../../tests/extra" import PosthogProcessor from "../PosthogProcessor" import { Event, IdentityType, Hosting } from "@budibase/types" + const tk = require("timekeeper") + import * as cache from "../../../../cache/generic" import { CacheKey } from "../../../../cache/generic" import * as context from "../../../../context" diff --git a/packages/backend-core/src/features/index.ts b/packages/backend-core/src/features/index.ts index 8f5c903e05..ad517082de 100644 --- a/packages/backend-core/src/features/index.ts +++ b/packages/backend-core/src/features/index.ts @@ -1,5 +1,6 @@ import env from "../environment" import * as context from "../context" + export * from "./installation" /** diff --git a/packages/backend-core/src/index.ts b/packages/backend-core/src/index.ts index c7cf9f56cc..7bf26f3688 100644 --- a/packages/backend-core/src/index.ts +++ b/packages/backend-core/src/index.ts @@ -32,11 +32,14 @@ export * as blacklist from "./blacklist" export * as docUpdates from "./docUpdates" export * from "./utils/Duration" export { SearchParams } from "./db" +export * as docIds from "./docIds" +export * as security from "./security" // Add context to tenancy for backwards compatibility // only do this for external usages to prevent internal // circular dependencies import * as context from "./context" import * as _tenancy from "./tenancy" + export const tenancy = { ..._tenancy, ...context, @@ -50,6 +53,7 @@ export * from "./constants" // expose package init function import * as db from "./db" + export const init = (opts: any = {}) => { db.init(opts.db) } diff --git a/packages/backend-core/src/installation.ts b/packages/backend-core/src/installation.ts index 17eda2004d..ca35b926fb 100644 --- a/packages/backend-core/src/installation.ts +++ b/packages/backend-core/src/installation.ts @@ -1,7 +1,6 @@ import { newid } from "./utils" import * as events from "./events" -import { StaticDatabases } from "./db" -import { doWithDB } from "./db" +import { StaticDatabases, doWithDB } from "./db" import { Installation, IdentityType, Database } from "@budibase/types" import * as context from "./context" import semver from "semver" diff --git a/packages/backend-core/src/logging/correlation/correlation.ts b/packages/backend-core/src/logging/correlation/correlation.ts index b5b47df9c6..0498bd43d5 100644 --- a/packages/backend-core/src/logging/correlation/correlation.ts +++ b/packages/backend-core/src/logging/correlation/correlation.ts @@ -1,4 +1,5 @@ import { Header } from "../../constants" + const correlator = require("correlation-id") export const setHeader = (headers: any) => { diff --git a/packages/backend-core/src/logging/correlation/middleware.ts b/packages/backend-core/src/logging/correlation/middleware.ts index f77714a5ae..45baee1fb1 100644 --- a/packages/backend-core/src/logging/correlation/middleware.ts +++ b/packages/backend-core/src/logging/correlation/middleware.ts @@ -1,5 +1,6 @@ import { Header } from "../../constants" import { v4 as uuid } from "uuid" + const correlator = require("correlation-id") const correlation = (ctx: any, next: any) => { diff --git a/packages/backend-core/src/logging/pino/logger.ts b/packages/backend-core/src/logging/pino/logger.ts index 7c444a3a59..7a051e7f12 100644 --- a/packages/backend-core/src/logging/pino/logger.ts +++ b/packages/backend-core/src/logging/pino/logger.ts @@ -5,6 +5,8 @@ import { IdentityType } from "@budibase/types" import env from "../../environment" import * as context from "../../context" import * as correlation from "../correlation" +import tracer from "dd-trace" +import { formats } from "dd-trace/ext" import { localFileDestination } from "../system" @@ -115,6 +117,11 @@ if (!env.DISABLE_PINO_LOGGER) { correlationId: correlation.getId(), } + const span = tracer.scope().active() + if (span) { + tracer.inject(span.context(), formats.LOG, contextObject) + } + const mergingObject: any = { err: error, pid: process.pid, diff --git a/packages/backend-core/src/logging/pino/middleware.ts b/packages/backend-core/src/logging/pino/middleware.ts index 569420c5f2..df18a35eb1 100644 --- a/packages/backend-core/src/logging/pino/middleware.ts +++ b/packages/backend-core/src/logging/pino/middleware.ts @@ -1,9 +1,12 @@ import env from "../../environment" import { logger } from "./logger" import { IncomingMessage } from "http" + const pino = require("koa-pino-logger") + import { Options } from "pino-http" import { Ctx } from "@budibase/types" + const correlator = require("correlation-id") export function pinoSettings(): Options { diff --git a/packages/backend-core/src/middleware/authenticated.ts b/packages/backend-core/src/middleware/authenticated.ts index 8bd6591d05..d357dbdbdc 100644 --- a/packages/backend-core/src/middleware/authenticated.ts +++ b/packages/backend-core/src/middleware/authenticated.ts @@ -13,8 +13,9 @@ import { getGlobalDB, doInTenant } from "../context" import { decrypt } from "../security/encryption" import * as identity from "../context/identity" import env from "../environment" -import { Ctx, EndpointMatcher } from "@budibase/types" +import { Ctx, EndpointMatcher, SessionCookie } from "@budibase/types" import { InvalidAPIKeyError, ErrorCode } from "../errors" +import tracer from "dd-trace" const ONE_MINUTE = env.SESSION_UPDATE_PERIOD ? parseInt(env.SESSION_UPDATE_PERIOD) @@ -98,7 +99,9 @@ export default function ( // check the actual user is authenticated first, try header or cookie let headerToken = ctx.request.headers[Header.TOKEN] - const authCookie = getCookie(ctx, Cookie.Auth) || openJwt(headerToken) + const authCookie = + getCookie(ctx, Cookie.Auth) || + openJwt(headerToken) let apiKey = ctx.request.headers[Header.API_KEY] if (!apiKey && ctx.request.headers[Header.AUTHORIZATION]) { @@ -164,6 +167,16 @@ export default function ( if (!authenticated) { authenticated = false } + + if (user) { + tracer.setUser({ + id: user?._id, + tenantId: user?.tenantId, + budibaseAccess: user?.budibaseAccess, + status: user?.status, + }) + } + // isAuthenticated is a function, so use a variable to be able to check authed state finalise(ctx, { authenticated, user, internal, version, publicEndpoint }) diff --git a/packages/backend-core/src/middleware/index.ts b/packages/backend-core/src/middleware/index.ts index 980bf06b00..e1eb7f1d26 100644 --- a/packages/backend-core/src/middleware/index.ts +++ b/packages/backend-core/src/middleware/index.ts @@ -2,6 +2,7 @@ export * as local from "./passport/local" export * as google from "./passport/sso/google" export * as oidc from "./passport/sso/oidc" import * as datasourceGoogle from "./passport/datasource/google" + export const datasource = { google: datasourceGoogle, } diff --git a/packages/backend-core/src/middleware/passport/datasource/google.ts b/packages/backend-core/src/middleware/passport/datasource/google.ts index ae6b3b4913..ab4ffee9d2 100644 --- a/packages/backend-core/src/middleware/passport/datasource/google.ts +++ b/packages/backend-core/src/middleware/passport/datasource/google.ts @@ -3,7 +3,7 @@ import { Cookie } from "../../../constants" import * as configs from "../../../configs" import * as cache from "../../../cache" import * as utils from "../../../utils" -import { UserCtx, SSOProfile } from "@budibase/types" +import { UserCtx, SSOProfile, DatasourceAuthCookie } from "@budibase/types" import { ssoSaveUserNoOp } from "../sso/sso" const GoogleStrategy = require("passport-google-oauth").OAuth2Strategy @@ -58,7 +58,14 @@ export async function postAuth( const platformUrl = await configs.getPlatformUrl({ tenantAware: false }) let callbackUrl = `${platformUrl}/api/global/auth/datasource/google/callback` - const authStateCookie = utils.getCookie(ctx, Cookie.DatasourceAuth) + const authStateCookie = utils.getCookie<{ appId: string }>( + ctx, + Cookie.DatasourceAuth + ) + + if (!authStateCookie) { + throw new Error("Unable to fetch datasource auth cookie") + } return passport.authenticate( new GoogleStrategy( diff --git a/packages/backend-core/src/middleware/passport/sso/google.ts b/packages/backend-core/src/middleware/passport/sso/google.ts index ad7593e63d..2a08ad7665 100644 --- a/packages/backend-core/src/middleware/passport/sso/google.ts +++ b/packages/backend-core/src/middleware/passport/sso/google.ts @@ -8,6 +8,7 @@ import { SaveSSOUserFunction, GoogleInnerConfig, } from "@budibase/types" + const GoogleStrategy = require("passport-google-oauth").OAuth2Strategy export function buildVerifyFn(saveUserFn: SaveSSOUserFunction) { diff --git a/packages/backend-core/src/middleware/passport/sso/tests/google.spec.ts b/packages/backend-core/src/middleware/passport/sso/tests/google.spec.ts index d0689a1f0a..9bf855b3c5 100644 --- a/packages/backend-core/src/middleware/passport/sso/tests/google.spec.ts +++ b/packages/backend-core/src/middleware/passport/sso/tests/google.spec.ts @@ -6,6 +6,7 @@ const mockStrategy = require("passport-google-oauth").OAuth2Strategy jest.mock("../sso") import * as _sso from "../sso" + const sso = jest.mocked(_sso) const mockSaveUserFn = jest.fn() diff --git a/packages/backend-core/src/middleware/passport/sso/tests/sso.spec.ts b/packages/backend-core/src/middleware/passport/sso/tests/sso.spec.ts index c3ddf220e6..d3486a5b14 100644 --- a/packages/backend-core/src/middleware/passport/sso/tests/sso.spec.ts +++ b/packages/backend-core/src/middleware/passport/sso/tests/sso.spec.ts @@ -11,6 +11,7 @@ const mockSaveUser = jest.fn() jest.mock("../../../../users") import * as _users from "../../../../users" + const users = jest.mocked(_users) const getErrorMessage = () => { diff --git a/packages/backend-core/src/middleware/tests/builder.spec.ts b/packages/backend-core/src/middleware/tests/builder.spec.ts index d350eff4f6..0514dc13f0 100644 --- a/packages/backend-core/src/middleware/tests/builder.spec.ts +++ b/packages/backend-core/src/middleware/tests/builder.spec.ts @@ -5,6 +5,7 @@ import { structures } from "../../../tests" import { ContextUser, ServiceType } from "@budibase/types" import { doInAppContext } from "../../context" import env from "../../environment" + env._set("SERVICE_TYPE", ServiceType.APPS) const appId = "app_aaa" diff --git a/packages/backend-core/src/objectStore/objectStore.ts b/packages/backend-core/src/objectStore/objectStore.ts index 76d2dd6689..57ead0e809 100644 --- a/packages/backend-core/src/objectStore/objectStore.ts +++ b/packages/backend-core/src/objectStore/objectStore.ts @@ -1,4 +1,5 @@ const sanitize = require("sanitize-s3-objectkey") + import AWS from "aws-sdk" import stream, { Readable } from "stream" import fetch from "node-fetch" @@ -304,20 +305,33 @@ export async function retrieveDirectory(bucketName: string, path: string) { let writePath = join(budibaseTempDir(), v4()) fs.mkdirSync(writePath) const objects = await listAllObjects(bucketName, path) - let fullObjects = await Promise.all( - objects.map(obj => retrieve(bucketName, obj.Key!)) + let streams = await Promise.all( + objects.map(obj => getReadStream(bucketName, obj.Key!)) ) let count = 0 + const writePromises: Promise[] = [] for (let obj of objects) { const filename = obj.Key! - const data = fullObjects[count++] + const stream = streams[count++] const possiblePath = filename.split("/") - if (possiblePath.length > 1) { - const dirs = possiblePath.slice(0, possiblePath.length - 1) - fs.mkdirSync(join(writePath, ...dirs), { recursive: true }) + const dirs = possiblePath.slice(0, possiblePath.length - 1) + const possibleDir = join(writePath, ...dirs) + if (possiblePath.length > 1 && !fs.existsSync(possibleDir)) { + fs.mkdirSync(possibleDir, { recursive: true }) } - fs.writeFileSync(join(writePath, ...possiblePath), data) + const writeStream = fs.createWriteStream(join(writePath, ...possiblePath), { + mode: 0o644, + }) + stream.pipe(writeStream) + writePromises.push( + new Promise((resolve, reject) => { + stream.on("finish", resolve) + stream.on("error", reject) + writeStream.on("error", reject) + }) + ) } + await Promise.all(writePromises) return writePath } diff --git a/packages/backend-core/src/queue/constants.ts b/packages/backend-core/src/queue/constants.ts index e1ffcfee36..eb4f21aced 100644 --- a/packages/backend-core/src/queue/constants.ts +++ b/packages/backend-core/src/queue/constants.ts @@ -3,4 +3,5 @@ export enum JobQueue { APP_BACKUP = "appBackupQueue", AUDIT_LOG = "auditLogQueue", SYSTEM_EVENT_QUEUE = "systemEventQueue", + APP_MIGRATION = "appMigration", } diff --git a/packages/backend-core/src/queue/inMemoryQueue.ts b/packages/backend-core/src/queue/inMemoryQueue.ts index a8add7ecb6..c05bbffbe9 100644 --- a/packages/backend-core/src/queue/inMemoryQueue.ts +++ b/packages/backend-core/src/queue/inMemoryQueue.ts @@ -15,6 +15,7 @@ function newJob(queue: string, message: any) { timestamp: Date.now(), queue: queue, data: message, + opts: {}, } } @@ -68,6 +69,10 @@ class InMemoryQueue { }) } + async isReady() { + return true + } + // simply puts a message to the queue and emits to the queue for processing /** * Simple function to replicate the add message functionality of Bull, putting diff --git a/packages/backend-core/src/queue/listeners.ts b/packages/backend-core/src/queue/listeners.ts index 42e3172364..063a01bd2f 100644 --- a/packages/backend-core/src/queue/listeners.ts +++ b/packages/backend-core/src/queue/listeners.ts @@ -87,6 +87,7 @@ enum QueueEventType { APP_BACKUP_EVENT = "app-backup-event", AUDIT_LOG_EVENT = "audit-log-event", SYSTEM_EVENT = "system-event", + APP_MIGRATION = "app-migration", } const EventTypeMap: { [key in JobQueue]: QueueEventType } = { @@ -94,6 +95,7 @@ const EventTypeMap: { [key in JobQueue]: QueueEventType } = { [JobQueue.APP_BACKUP]: QueueEventType.APP_BACKUP_EVENT, [JobQueue.AUDIT_LOG]: QueueEventType.AUDIT_LOG_EVENT, [JobQueue.SYSTEM_EVENT_QUEUE]: QueueEventType.SYSTEM_EVENT, + [JobQueue.APP_MIGRATION]: QueueEventType.APP_MIGRATION, } function logging(queue: Queue, jobQueue: JobQueue) { diff --git a/packages/backend-core/src/queue/queue.ts b/packages/backend-core/src/queue/queue.ts index 0657437a3b..b95dace5b2 100644 --- a/packages/backend-core/src/queue/queue.ts +++ b/packages/backend-core/src/queue/queue.ts @@ -47,7 +47,7 @@ export function createQueue( cleanupInterval = timers.set(cleanup, CLEANUP_PERIOD_MS) // fire off an initial cleanup cleanup().catch(err => { - console.error(`Unable to cleanup automation queue initially - ${err}`) + console.error(`Unable to cleanup ${jobQueue} initially - ${err}`) }) } return queue diff --git a/packages/backend-core/src/redis/init.ts b/packages/backend-core/src/redis/init.ts index 55ffe3dd12..f3bcee3209 100644 --- a/packages/backend-core/src/redis/init.ts +++ b/packages/backend-core/src/redis/init.ts @@ -7,15 +7,19 @@ let userClient: Client, cacheClient: Client, writethroughClient: Client, lockClient: Client, - socketClient: Client + socketClient: Client, + inviteClient: Client, + passwordResetClient: Client -async function init() { +export async function init() { userClient = await new Client(utils.Databases.USER_CACHE).init() sessionClient = await new Client(utils.Databases.SESSIONS).init() appClient = await new Client(utils.Databases.APP_METADATA).init() cacheClient = await new Client(utils.Databases.GENERIC_CACHE).init() lockClient = await new Client(utils.Databases.LOCKS).init() writethroughClient = await new Client(utils.Databases.WRITE_THROUGH).init() + inviteClient = await new Client(utils.Databases.INVITATIONS).init() + passwordResetClient = await new Client(utils.Databases.PW_RESETS).init() socketClient = await new Client( utils.Databases.SOCKET_IO, utils.SelectableDatabase.SOCKET_IO @@ -29,6 +33,8 @@ export async function shutdown() { if (cacheClient) await cacheClient.finish() if (writethroughClient) await writethroughClient.finish() if (lockClient) await lockClient.finish() + if (inviteClient) await inviteClient.finish() + if (passwordResetClient) await passwordResetClient.finish() if (socketClient) await socketClient.finish() } @@ -84,3 +90,17 @@ export async function getSocketClient() { } return socketClient } + +export async function getInviteClient() { + if (!inviteClient) { + await init() + } + return inviteClient +} + +export async function getPasswordResetClient() { + if (!passwordResetClient) { + await init() + } + return passwordResetClient +} diff --git a/packages/backend-core/src/redis/redis.ts b/packages/backend-core/src/redis/redis.ts index 6f1b573718..d15453ba62 100644 --- a/packages/backend-core/src/redis/redis.ts +++ b/packages/backend-core/src/redis/redis.ts @@ -18,6 +18,7 @@ import { SelectableDatabase, getRedisConnectionDetails, } from "./utils" +import { logAlert } from "../logging" import * as timers from "../timers" const RETRY_PERIOD_MS = 2000 @@ -28,7 +29,6 @@ const DEFAULT_SELECT_DB = SelectableDatabase.DEFAULT // for testing just generate the client once let CLOSED = false let CLIENTS: { [key: number]: any } = {} -0 let CONNECTED = false // mock redis always connected @@ -40,21 +40,16 @@ function pickClient(selectDb: number): any { return CLIENTS[selectDb] } -function connectionError( - selectDb: number, - timeout: NodeJS.Timeout, - err: Error | string -) { +function connectionError(timeout: NodeJS.Timeout, err: Error | string) { // manually shut down, ignore errors if (CLOSED) { return } - pickClient(selectDb).disconnect() CLOSED = true // always clear this on error clearTimeout(timeout) CONNECTED = false - console.error("Redis connection failed - " + err) + logAlert("Redis connection failed", err) setTimeout(() => { init() }, RETRY_PERIOD_MS) @@ -80,11 +75,7 @@ function init(selectDb = DEFAULT_SELECT_DB) { // start the timer - only allowed 5 seconds to connect timeout = setTimeout(() => { if (!CONNECTED) { - connectionError( - selectDb, - timeout, - "Did not successfully connect in timeout" - ) + connectionError(timeout, "Did not successfully connect in timeout") } }, STARTUP_TIMEOUT_MS) @@ -107,12 +98,13 @@ function init(selectDb = DEFAULT_SELECT_DB) { // allow the process to exit return } - connectionError(selectDb, timeout, err) + connectionError(timeout, err) }) client.on("error", (err: Error) => { - connectionError(selectDb, timeout, err) + connectionError(timeout, err) }) client.on("connect", () => { + console.log(`Connected to Redis DB: ${selectDb}`) clearTimeout(timeout) CONNECTED = true }) diff --git a/packages/backend-core/src/redis/redlockImpl.ts b/packages/backend-core/src/redis/redlockImpl.ts index 7fe61a409e..7009dc6f55 100644 --- a/packages/backend-core/src/redis/redlockImpl.ts +++ b/packages/backend-core/src/redis/redlockImpl.ts @@ -2,7 +2,8 @@ import Redlock from "redlock" import { getLockClient } from "./init" import { LockOptions, LockType } from "@budibase/types" import * as context from "../context" -import env from "../environment" +import { utils } from "@budibase/shared-core" +import { Duration } from "../utils" async function getClient( type: LockType, @@ -11,9 +12,7 @@ async function getClient( if (type === LockType.CUSTOM) { return newRedlock(opts) } - if (env.isTest() && type !== LockType.TRY_ONCE) { - return newRedlock(OPTIONS.TEST) - } + switch (type) { case LockType.TRY_ONCE: { return newRedlock(OPTIONS.TRY_ONCE) @@ -27,13 +26,16 @@ async function getClient( case LockType.DELAY_500: { return newRedlock(OPTIONS.DELAY_500) } + case LockType.AUTO_EXTEND: { + return newRedlock(OPTIONS.AUTO_EXTEND) + } default: { - throw new Error(`Could not get redlock client: ${type}`) + throw utils.unreachable(type) } } } -const OPTIONS = { +const OPTIONS: Record = { TRY_ONCE: { // immediately throws an error if the lock is already held retryCount: 0, @@ -41,11 +43,6 @@ const OPTIONS = { TRY_TWICE: { retryCount: 1, }, - TEST: { - // higher retry count in unit tests - // due to high contention. - retryCount: 100, - }, DEFAULT: { // the expected clock drift; for more details // see http://redis.io/topics/distlock @@ -66,10 +63,14 @@ const OPTIONS = { DELAY_500: { retryDelay: 500, }, + CUSTOM: {}, + AUTO_EXTEND: { + retryCount: -1, + }, } export async function newRedlock(opts: Redlock.Options = {}) { - let options = { ...OPTIONS.DEFAULT, ...opts } + const options = { ...OPTIONS.DEFAULT, ...opts } const redisWrapper = await getLockClient() const client = redisWrapper.getClient() return new Redlock([client], options) @@ -99,24 +100,42 @@ function getLockName(opts: LockOptions) { return name } +export const AUTO_EXTEND_POLLING_MS = Duration.fromSeconds(10).toMs() + export async function doWithLock( opts: LockOptions, task: () => Promise ): Promise> { const redlock = await getClient(opts.type, opts.customOptions) - let lock + let lock: Redlock.Lock | undefined + let timeout try { const name = getLockName(opts) + const ttl = + opts.type === LockType.AUTO_EXTEND ? AUTO_EXTEND_POLLING_MS : opts.ttl + // create the lock - lock = await redlock.lock(name, opts.ttl) + lock = await redlock.lock(name, ttl) + + if (opts.type === LockType.AUTO_EXTEND) { + // We keep extending the lock while the task is running + const extendInIntervals = (): void => { + timeout = setTimeout(async () => { + lock = await lock!.extend(ttl, () => opts.onExtend && opts.onExtend()) + + extendInIntervals() + }, ttl / 2) + } + + extendInIntervals() + } // perform locked task // need to await to ensure completion before unlocking const result = await task() return { executed: true, result } } catch (e: any) { - console.warn("lock error") // lock limit exceeded if (e.name === "LockError") { if (opts.type === LockType.TRY_ONCE) { @@ -124,16 +143,13 @@ export async function doWithLock( // due to retry count (0) exceeded return { executed: false } } else { - console.error(e) throw e } } else { - console.error(e) throw e } } finally { - if (lock) { - await lock.unlock() - } + clearTimeout(timeout) + await lock?.unlock() } } diff --git a/packages/backend-core/src/redis/tests/redlockImpl.spec.ts b/packages/backend-core/src/redis/tests/redlockImpl.spec.ts new file mode 100644 index 0000000000..a1e83d8e6c --- /dev/null +++ b/packages/backend-core/src/redis/tests/redlockImpl.spec.ts @@ -0,0 +1,105 @@ +import { LockName, LockType, LockOptions } from "@budibase/types" +import { AUTO_EXTEND_POLLING_MS, doWithLock } from "../redlockImpl" +import { DBTestConfiguration, generator } from "../../../tests" + +describe("redlockImpl", () => { + beforeEach(() => { + jest.useFakeTimers() + }) + + describe("doWithLock", () => { + const config = new DBTestConfiguration() + const lockTtl = AUTO_EXTEND_POLLING_MS + + function runLockWithExecutionTime({ + opts, + task, + executionTimeMs, + }: { + opts: LockOptions + task: () => Promise + executionTimeMs: number + }) { + return config.doInTenant(() => + doWithLock(opts, async () => { + // Run in multiple intervals until hitting the expected time + const interval = lockTtl / 10 + for (let i = executionTimeMs; i > 0; i -= interval) { + await jest.advanceTimersByTimeAsync(interval) + } + return task() + }) + ) + } + + it.each(Object.values(LockType))( + "should return the task value and release the lock", + async (lockType: LockType) => { + const expectedResult = generator.guid() + const mockTask = jest.fn().mockResolvedValue(expectedResult) + + const opts: LockOptions = { + name: LockName.PERSIST_WRITETHROUGH, + type: lockType, + ttl: lockTtl, + } + + const result = await runLockWithExecutionTime({ + opts, + task: mockTask, + executionTimeMs: 0, + }) + + expect(result.executed).toBe(true) + expect(result.executed && result.result).toBe(expectedResult) + expect(mockTask).toHaveBeenCalledTimes(1) + } + ) + + it("should extend when type is autoextend", async () => { + const expectedResult = generator.guid() + const mockTask = jest.fn().mockResolvedValue(expectedResult) + const mockOnExtend = jest.fn() + + const opts: LockOptions = { + name: LockName.PERSIST_WRITETHROUGH, + type: LockType.AUTO_EXTEND, + onExtend: mockOnExtend, + } + + const result = await runLockWithExecutionTime({ + opts, + task: mockTask, + executionTimeMs: lockTtl * 2.5, + }) + + expect(result.executed).toBe(true) + expect(result.executed && result.result).toBe(expectedResult) + expect(mockTask).toHaveBeenCalledTimes(1) + expect(mockOnExtend).toHaveBeenCalledTimes(5) + }) + + it.each(Object.values(LockType).filter(t => t !== LockType.AUTO_EXTEND))( + "should timeout when type is %s", + async (lockType: LockType) => { + const mockTask = jest.fn().mockResolvedValue("mockResult") + + const opts: LockOptions = { + name: LockName.PERSIST_WRITETHROUGH, + type: lockType, + ttl: lockTtl, + } + + await expect( + runLockWithExecutionTime({ + opts, + task: mockTask, + executionTimeMs: lockTtl * 2, + }) + ).rejects.toThrowError( + `Unable to fully release the lock on resource \"lock:${config.tenantId}_persist_writethrough\".` + ) + } + ) + }) +}) diff --git a/packages/backend-core/src/redis/utils.ts b/packages/backend-core/src/redis/utils.ts index 5187fe13f8..4d8b1bb9a4 100644 --- a/packages/backend-core/src/redis/utils.ts +++ b/packages/backend-core/src/redis/utils.ts @@ -75,10 +75,12 @@ export function getRedisConnectionDetails() { } const [host, port] = url.split(":") + const portNumber = parseInt(port) return { host, password, - port: parseInt(port), + // assume default port for redis if invalid found + port: isNaN(portNumber) ? 6379 : portNumber, } } diff --git a/packages/backend-core/src/security/auth.ts b/packages/backend-core/src/security/auth.ts new file mode 100644 index 0000000000..1cce35a0af --- /dev/null +++ b/packages/backend-core/src/security/auth.ts @@ -0,0 +1,24 @@ +import env from "../environment" + +export const PASSWORD_MIN_LENGTH = +(env.PASSWORD_MIN_LENGTH || 8) +export const PASSWORD_MAX_LENGTH = +(env.PASSWORD_MAX_LENGTH || 512) + +export function validatePassword( + password: string +): { valid: true } | { valid: false; error: string } { + if (!password || password.length < PASSWORD_MIN_LENGTH) { + return { + valid: false, + error: `Password invalid. Minimum ${PASSWORD_MIN_LENGTH} characters.`, + } + } + + if (password.length > PASSWORD_MAX_LENGTH) { + return { + valid: false, + error: `Password invalid. Maximum ${PASSWORD_MAX_LENGTH} characters.`, + } + } + + return { valid: true } +} diff --git a/packages/backend-core/src/security/encryption.ts b/packages/backend-core/src/security/encryption.ts index 7a8cfaf04a..45ed566a92 100644 --- a/packages/backend-core/src/security/encryption.ts +++ b/packages/backend-core/src/security/encryption.ts @@ -73,6 +73,9 @@ export async function encryptFile( const outputFileName = `${filename}.enc` const filePath = join(dir, filename) + if (fs.lstatSync(filePath).isDirectory()) { + throw new Error("Unable to encrypt directory") + } const inputFile = fs.createReadStream(filePath) const outputFile = fs.createWriteStream(join(dir, outputFileName)) @@ -110,6 +113,9 @@ export async function decryptFile( outputPath: string, secret: string ) { + if (fs.lstatSync(inputPath).isDirectory()) { + throw new Error("Unable to encrypt directory") + } const { salt, iv } = await getSaltAndIV(inputPath) const inputFile = fs.createReadStream(inputPath, { start: SALT_LENGTH + IV_LENGTH, diff --git a/packages/backend-core/src/security/index.ts b/packages/backend-core/src/security/index.ts new file mode 100644 index 0000000000..306751af96 --- /dev/null +++ b/packages/backend-core/src/security/index.ts @@ -0,0 +1 @@ +export * from "./auth" diff --git a/packages/backend-core/src/security/permissions.ts b/packages/backend-core/src/security/permissions.ts index fe4095d210..98704f16c6 100644 --- a/packages/backend-core/src/security/permissions.ts +++ b/packages/backend-core/src/security/permissions.ts @@ -160,4 +160,5 @@ export function isPermissionLevelHigherThanRead(level: PermissionLevel) { // utility as a lot of things need simply the builder permission export const BUILDER = PermissionType.BUILDER +export const CREATOR = PermissionType.CREATOR export const GLOBAL_BUILDER = PermissionType.GLOBAL_BUILDER diff --git a/packages/backend-core/src/security/roles.ts b/packages/backend-core/src/security/roles.ts index 0d33031de5..4f048c0a11 100644 --- a/packages/backend-core/src/security/roles.ts +++ b/packages/backend-core/src/security/roles.ts @@ -1,7 +1,12 @@ import { BuiltinPermissionID, PermissionLevel } from "./permissions" -import { prefixRoleID, getRoleParams, DocumentType, SEPARATOR } from "../db" +import { + prefixRoleID, + getRoleParams, + DocumentType, + SEPARATOR, + doWithDB, +} from "../db" import { getAppDB } from "../context" -import { doWithDB } from "../db" import { Screen, Role as RoleDoc } from "@budibase/types" import cloneDeep from "lodash/fp/cloneDeep" diff --git a/packages/backend-core/src/security/sessions.ts b/packages/backend-core/src/security/sessions.ts index 5a535c0c46..a86a829b17 100644 --- a/packages/backend-core/src/security/sessions.ts +++ b/packages/backend-core/src/security/sessions.ts @@ -1,6 +1,7 @@ const redis = require("../redis/init") const { v4: uuidv4 } = require("uuid") const { logWarn } = require("../logging") + import env from "../environment" import { Session, diff --git a/packages/backend-core/src/security/tests/auth.spec.ts b/packages/backend-core/src/security/tests/auth.spec.ts new file mode 100644 index 0000000000..b1835fdfb3 --- /dev/null +++ b/packages/backend-core/src/security/tests/auth.spec.ts @@ -0,0 +1,45 @@ +import { generator } from "../../../tests" +import { PASSWORD_MAX_LENGTH, validatePassword } from "../auth" + +describe("auth", () => { + describe("validatePassword", () => { + it("a valid password returns successful", () => { + expect(validatePassword("password")).toEqual({ valid: true }) + }) + + it.each([ + ["undefined", undefined], + ["null", null], + ["empty", ""], + ])("%s returns unsuccessful", (_, password) => { + expect(validatePassword(password as string)).toEqual({ + valid: false, + error: "Password invalid. Minimum 8 characters.", + }) + }) + + it.each([ + generator.word({ length: PASSWORD_MAX_LENGTH }), + generator.paragraph().substring(0, PASSWORD_MAX_LENGTH), + ])(`can use passwords up to 512 characters in length`, password => { + expect(validatePassword(password)).toEqual({ + valid: true, + }) + }) + + it.each([ + generator.word({ length: PASSWORD_MAX_LENGTH + 1 }), + generator + .paragraph({ sentences: 50 }) + .substring(0, PASSWORD_MAX_LENGTH + 1), + ])( + `passwords cannot have more than ${PASSWORD_MAX_LENGTH} characters`, + password => { + expect(validatePassword(password)).toEqual({ + valid: false, + error: "Password invalid. Maximum 512 characters.", + }) + } + ) + }) +}) diff --git a/packages/backend-core/src/tenancy/tenancy.ts b/packages/backend-core/src/tenancy/tenancy.ts index 7b17bdbe18..8835960ca5 100644 --- a/packages/backend-core/src/tenancy/tenancy.ts +++ b/packages/backend-core/src/tenancy/tenancy.ts @@ -39,7 +39,7 @@ const ALL_STRATEGIES = Object.values(TenantResolutionStrategy) export const getTenantIDFromCtx = ( ctx: BBContext, opts: GetTenantIdOptions -): string | null => { +): string | undefined => { // exit early if not multi-tenant if (!isMultiTenant()) { return DEFAULT_TENANT_ID @@ -93,11 +93,19 @@ export const getTenantIDFromCtx = ( // subdomain if (isAllowed(TenantResolutionStrategy.SUBDOMAIN)) { // e.g. budibase.app or local.com:10000 - const platformHost = new URL(getPlatformURL()).host.split(":")[0] + let platformHost + try { + platformHost = new URL(getPlatformURL()).host.split(":")[0] + } catch (err: any) { + // if invalid URL, just don't try to process subdomain + if (err.code !== "ERR_INVALID_URL") { + throw err + } + } // e.g. tenant.budibase.app or tenant.local.com const requestHost = ctx.host // parse the tenant id from the difference - if (requestHost.includes(platformHost)) { + if (platformHost && requestHost.includes(platformHost)) { const tenantId = requestHost.substring( 0, requestHost.indexOf(`.${platformHost}`) @@ -136,5 +144,5 @@ export const getTenantIDFromCtx = ( ctx.throw(403, "Tenant id not set") } - return null + return undefined } diff --git a/packages/backend-core/src/tenancy/tests/tenancy.spec.ts b/packages/backend-core/src/tenancy/tests/tenancy.spec.ts index ebeaca074c..95dd76a6dd 100644 --- a/packages/backend-core/src/tenancy/tests/tenancy.spec.ts +++ b/packages/backend-core/src/tenancy/tests/tenancy.spec.ts @@ -157,12 +157,12 @@ describe("getTenantIDFromCtx", () => { TenantResolutionStrategy.PATH, ], } - expect(getTenantIDFromCtx(ctx, mockOpts)).toBeNull() + expect(getTenantIDFromCtx(ctx, mockOpts)).toBeUndefined() expect(ctx.throw).toBeCalledTimes(1) expect(ctx.throw).toBeCalledWith(403, "Tenant id not set") }) - it("returns null if allowNoTenant is true", () => { + it("returns undefined if allowNoTenant is true", () => { const ctx = createCtx({}) mockOpts = { allowNoTenant: true, @@ -172,7 +172,7 @@ describe("getTenantIDFromCtx", () => { TenantResolutionStrategy.PATH, ], } - expect(getTenantIDFromCtx(ctx, mockOpts)).toBeNull() + expect(getTenantIDFromCtx(ctx, mockOpts)).toBeUndefined() }) }) diff --git a/packages/backend-core/src/timers/timers.ts b/packages/backend-core/src/timers/timers.ts index 000be74821..9121c576cd 100644 --- a/packages/backend-core/src/timers/timers.ts +++ b/packages/backend-core/src/timers/timers.ts @@ -20,3 +20,41 @@ export function cleanup() { } intervals = [] } + +export class ExecutionTimeoutError extends Error { + public readonly name = "ExecutionTimeoutError" +} + +export class ExecutionTimeTracker { + static withLimit(limitMs: number) { + return new ExecutionTimeTracker(limitMs) + } + + constructor(readonly limitMs: number) {} + + private totalTimeMs = 0 + + track(f: () => T): T { + this.checkLimit() + const start = process.hrtime.bigint() + try { + return f() + } finally { + const end = process.hrtime.bigint() + this.totalTimeMs += Number(end - start) / 1e6 + this.checkLimit() + } + } + + get elapsedMS() { + return this.totalTimeMs + } + + checkLimit() { + if (this.totalTimeMs > this.limitMs) { + throw new ExecutionTimeoutError( + `Execution time limit of ${this.limitMs}ms exceeded: ${this.totalTimeMs}ms` + ) + } + } +} diff --git a/packages/backend-core/src/users/db.ts b/packages/backend-core/src/users/db.ts index c071064713..4d0d216603 100644 --- a/packages/backend-core/src/users/db.ts +++ b/packages/backend-core/src/users/db.ts @@ -1,6 +1,5 @@ import env from "../environment" import * as eventHelpers from "./events" -import * as accounts from "../accounts" import * as accountSdk from "../accounts" import * as cache from "../cache" import { getGlobalDB, getIdentity, getTenantId } from "../context" @@ -11,12 +10,10 @@ import * as sessions from "../security/sessions" import * as usersCore from "./users" import { Account, - AllDocsResponse, BulkUserCreated, BulkUserDeleted, isSSOAccount, isSSOUser, - RowResponse, SaveUserOpts, User, UserStatus, @@ -30,6 +27,7 @@ import { } from "./utils" import { searchExistingEmails } from "./lookup" import { hash } from "../utils" +import { validatePassword } from "../security" type QuotaUpdateFn = ( change: number, @@ -46,6 +44,12 @@ type GroupFns = { getBulk: GroupGetFn getGroupBuilderAppIds: GroupBuildersFn } +type CreateAdminUserOpts = { + ssoId?: string + hashPassword?: boolean + requirePassword?: boolean + skipPasswordValidation?: boolean +} type FeatureFns = { isSSOEnforced: FeatureFn; isAppBuildersEnabled: FeatureFn } const bulkDeleteProcessing = async (dbUser: User) => { @@ -113,6 +117,14 @@ export class UserDB { if (await UserDB.isPreventPasswordActions(user, account)) { throw new HTTPError("Password change is disabled for this user", 400) } + + if (!opts.skipPasswordValidation) { + const passwordValidation = validatePassword(password) + if (!passwordValidation.valid) { + throw new HTTPError(passwordValidation.error, 400) + } + } + hashedPassword = opts.hashPassword ? await hash(password) : password } else if (dbUser) { hashedPassword = dbUser.password @@ -149,12 +161,12 @@ export class UserDB { static async allUsers() { const db = getGlobalDB() - const response = await db.allDocs( + const response = await db.allDocs( dbUtils.getGlobalUserParams(null, { include_docs: true, }) ) - return response.rows.map((row: any) => row.doc) + return response.rows.map(row => row.doc!) } static async countUsersByApp(appId: string) { @@ -212,13 +224,6 @@ export class UserDB { throw new Error("_id or email is required") } - if ( - user.builder?.apps?.length && - !(await UserDB.features.isAppBuildersEnabled()) - ) { - throw new Error("Unable to update app builders, please check license") - } - let dbUser: User | undefined if (_id) { // try to get existing user from db @@ -303,7 +308,7 @@ export class UserDB { static async bulkCreate( newUsersRequested: User[], - groups: string[] + groups?: string[] ): Promise { const tenantId = getTenantId() @@ -328,7 +333,7 @@ export class UserDB { }) continue } - newUser.userGroups = groups + newUser.userGroups = groups || [] newUsers.push(newUser) if (isCreator(newUser)) { newCreators.push(newUser) @@ -413,15 +418,13 @@ export class UserDB { } // Get users and delete - const allDocsResponse: AllDocsResponse = await db.allDocs({ + const allDocsResponse = await db.allDocs({ include_docs: true, keys: userIds, }) - const usersToDelete: User[] = allDocsResponse.rows.map( - (user: RowResponse) => { - return user.doc - } - ) + const usersToDelete = allDocsResponse.rows.map(user => { + return user.doc! + }) // Delete from DB const toDelete = usersToDelete.map(user => ({ @@ -469,7 +472,7 @@ export class UserDB { if (!env.SELF_HOSTED && !env.DISABLE_ACCOUNT_PORTAL) { // root account holder can't be deleted from inside budibase const email = dbUser.email - const account = await accounts.getAccount(email) + const account = await accountSdk.getAccount(email) if (account) { if (dbUser.userId === getIdentity()!._id) { throw new HTTPError('Please visit "Account" to delete this user', 400) @@ -490,6 +493,38 @@ export class UserDB { await sessions.invalidateSessions(userId, { reason: "deletion" }) } + static async createAdminUser( + email: string, + password: string, + tenantId: string, + opts?: CreateAdminUserOpts + ) { + const user: User = { + email: email, + password: password, + createdAt: Date.now(), + roles: {}, + builder: { + global: true, + }, + admin: { + global: true, + }, + tenantId, + } + if (opts?.ssoId) { + user.ssoId = opts.ssoId + } + // always bust checklist beforehand, if an error occurs but can proceed, don't get + // stuck in a cycle + await cache.bustCache(cache.CacheKey.CHECKLIST) + return await UserDB.save(user, { + hashPassword: opts?.hashPassword, + requirePassword: opts?.requirePassword, + skipPasswordValidation: opts?.skipPasswordValidation, + }) + } + static async getGroups(groupIds: string[]) { return await this.groups.getBulk(groupIds) } diff --git a/packages/backend-core/src/users/lookup.ts b/packages/backend-core/src/users/lookup.ts index 17d0e91d88..355be74dab 100644 --- a/packages/backend-core/src/users/lookup.ts +++ b/packages/backend-core/src/users/lookup.ts @@ -6,6 +6,7 @@ import { } from "@budibase/types" import * as dbUtils from "../db" import { ViewName } from "../constants" +import { getExistingInvites } from "../cache/invite" /** * Apply a system-wide search on emails: @@ -26,6 +27,9 @@ export async function searchExistingEmails(emails: string[]) { const existingAccounts = await getExistingAccounts(emails) matchedEmails.push(...existingAccounts.map(account => account.email)) + const invitedEmails = await getExistingInvites(emails) + matchedEmails.push(...invitedEmails.map(invite => invite.email)) + return [...new Set(matchedEmails.map(email => email.toLowerCase()))] } diff --git a/packages/backend-core/src/users/users.ts b/packages/backend-core/src/users/users.ts index 6dc8750b62..cc2b4fc27f 100644 --- a/packages/backend-core/src/users/users.ts +++ b/packages/backend-core/src/users/users.ts @@ -25,6 +25,7 @@ import { import { getGlobalDB } from "../context" import * as context from "../context" import { isCreator } from "./utils" +import { UserDB } from "./db" type GetOpts = { cleanup?: boolean } @@ -43,7 +44,7 @@ function removeUserPassword(users: User | User[]) { return users } -export const isSupportedUserSearch = (query: SearchQuery) => { +export function isSupportedUserSearch(query: SearchQuery) { const allowed = [ { op: SearchQueryOperators.STRING, key: "email" }, { op: SearchQueryOperators.EQUAL, key: "_id" }, @@ -68,10 +69,10 @@ export const isSupportedUserSearch = (query: SearchQuery) => { return true } -export const bulkGetGlobalUsersById = async ( +export async function bulkGetGlobalUsersById( userIds: string[], opts?: GetOpts -) => { +) { const db = getGlobalDB() let users = ( await db.allDocs({ @@ -85,7 +86,7 @@ export const bulkGetGlobalUsersById = async ( return users } -export const getAllUserIds = async () => { +export async function getAllUserIds() { const db = getGlobalDB() const startKey = `${DocumentType.USER}${SEPARATOR}` const response = await db.allDocs({ @@ -95,7 +96,7 @@ export const getAllUserIds = async () => { return response.rows.map(row => row.id) } -export const bulkUpdateGlobalUsers = async (users: User[]) => { +export async function bulkUpdateGlobalUsers(users: User[]) { const db = getGlobalDB() return (await db.bulkDocs(users)) as BulkDocsResponse } @@ -113,10 +114,10 @@ export async function getById(id: string, opts?: GetOpts): Promise { * Given an email address this will use a view to search through * all the users to find one with this email address. */ -export const getGlobalUserByEmail = async ( +export async function getGlobalUserByEmail( email: String, opts?: GetOpts -): Promise => { +): Promise { if (email == null) { throw "Must supply an email address to view" } @@ -139,11 +140,23 @@ export const getGlobalUserByEmail = async ( return user } -export const searchGlobalUsersByApp = async ( +export async function doesUserExist(email: string) { + try { + const user = await getGlobalUserByEmail(email) + if (Array.isArray(user) || user != null) { + return true + } + } catch (err) { + return false + } + return false +} + +export async function searchGlobalUsersByApp( appId: any, opts: DatabaseQueryOpts, getOpts?: GetOpts -) => { +) { if (typeof appId !== "string") { throw new Error("Must provide a string based app ID") } @@ -151,7 +164,7 @@ export const searchGlobalUsersByApp = async ( include_docs: true, }) params.startkey = opts && opts.startkey ? opts.startkey : params.startkey - let response = await queryGlobalView(ViewName.USER_BY_APP, params) + let response = await queryGlobalView(ViewName.USER_BY_APP, params) if (!response) { response = [] @@ -167,10 +180,10 @@ export const searchGlobalUsersByApp = async ( Return any user who potentially has access to the application Admins, developers and app users with the explicitly role. */ -export const searchGlobalUsersByAppAccess = async ( +export async function searchGlobalUsersByAppAccess( appId: any, opts?: { limit?: number } -) => { +) { const roleSelector = `roles.${appId}` let orQuery: any[] = [ @@ -205,7 +218,7 @@ export const searchGlobalUsersByAppAccess = async ( return resp.rows } -export const getGlobalUserByAppPage = (appId: string, user: User) => { +export function getGlobalUserByAppPage(appId: string, user: User) { if (!user) { return } @@ -215,11 +228,11 @@ export const getGlobalUserByAppPage = (appId: string, user: User) => { /** * Performs a starts with search on the global email view. */ -export const searchGlobalUsersByEmail = async ( +export async function searchGlobalUsersByEmail( email: string | unknown, opts: any, getOpts?: GetOpts -) => { +) { if (typeof email !== "string") { throw new Error("Must provide a string to search by") } @@ -242,12 +255,12 @@ export const searchGlobalUsersByEmail = async ( } const PAGE_LIMIT = 8 -export const paginatedUsers = async ({ +export async function paginatedUsers({ bookmark, query, appId, limit, -}: SearchUsersRequest = {}) => { +}: SearchUsersRequest = {}) { const db = getGlobalDB() const pageSize = limit ?? PAGE_LIMIT const pageLimit = pageSize + 1 @@ -324,3 +337,20 @@ export function cleanseUserObject(user: User | ContextUser, base?: User) { } return user } + +export async function addAppBuilder(user: User, appId: string) { + const prodAppId = getProdAppID(appId) + user.builder ??= {} + user.builder.creator = true + user.builder.apps ??= [] + user.builder.apps.push(prodAppId) + await UserDB.save(user, { hashPassword: false }) +} + +export async function removeAppBuilder(user: User, appId: string) { + const prodAppId = getProdAppID(appId) + if (user.builder && user.builder.apps?.includes(prodAppId)) { + user.builder.apps = user.builder.apps.filter(id => id !== prodAppId) + } + await UserDB.save(user, { hashPassword: false }) +} diff --git a/packages/backend-core/src/utils/Duration.ts b/packages/backend-core/src/utils/Duration.ts index f376c2f7c7..730b59d1dc 100644 --- a/packages/backend-core/src/utils/Duration.ts +++ b/packages/backend-core/src/utils/Duration.ts @@ -28,6 +28,9 @@ export class Duration { toMs: () => { return Duration.convert(from, DurationType.MILLISECONDS, duration) }, + toSeconds: () => { + return Duration.convert(from, DurationType.SECONDS, duration) + }, } } @@ -46,4 +49,8 @@ export class Duration { static fromDays(duration: number) { return Duration.from(DurationType.DAYS, duration) } + + static fromMilliseconds(duration: number) { + return Duration.from(DurationType.MILLISECONDS, duration) + } } diff --git a/packages/backend-core/src/utils/hashing.ts b/packages/backend-core/src/utils/hashing.ts index aba11f38e6..54d7de4aba 100644 --- a/packages/backend-core/src/utils/hashing.ts +++ b/packages/backend-core/src/utils/hashing.ts @@ -1,4 +1,5 @@ import env from "../environment" + export * from "../docIds/newid" const bcrypt = env.JS_BCRYPT ? require("bcryptjs") : require("bcrypt") diff --git a/packages/backend-core/src/utils/tests/utils.spec.ts b/packages/backend-core/src/utils/tests/utils.spec.ts index 5a0ac4f283..7b411e801c 100644 --- a/packages/backend-core/src/utils/tests/utils.spec.ts +++ b/packages/backend-core/src/utils/tests/utils.spec.ts @@ -188,4 +188,17 @@ describe("utils", () => { expectResult(false) }) }) + + describe("hasCircularStructure", () => { + it("should detect a circular structure", () => { + const a: any = { b: "b" } + const b = { a } + a.b = b + expect(utils.hasCircularStructure(b)).toBe(true) + }) + + it("should allow none circular structures", () => { + expect(utils.hasCircularStructure({ a: "b" })).toBe(false) + }) + }) }) diff --git a/packages/backend-core/src/utils/utils.ts b/packages/backend-core/src/utils/utils.ts index b92471a7a4..30cf55b149 100644 --- a/packages/backend-core/src/utils/utils.ts +++ b/packages/backend-core/src/utils/utils.ts @@ -11,7 +11,7 @@ import { TenantResolutionStrategy, } from "@budibase/types" import type { SetOption } from "cookies" -const jwt = require("jsonwebtoken") +import jwt, { Secret } from "jsonwebtoken" const APP_PREFIX = DocumentType.APP + SEPARATOR const PROD_APP_PREFIX = "/app/" @@ -31,8 +31,8 @@ export async function resolveAppUrl(ctx: Ctx) { const appUrl = ctx.path.split("/")[2] let possibleAppUrl = `/${appUrl.toLowerCase()}` - let tenantId: string | null = context.getTenantId() - if (env.MULTI_TENANCY) { + let tenantId: string | undefined = context.getTenantId() + if (!env.isDev() && env.MULTI_TENANCY) { // always use the tenant id from the subdomain in multi tenancy // this ensures the logged-in user tenant id doesn't overwrite // e.g. in the case of viewing a public app while already logged-in to another tenant @@ -41,7 +41,7 @@ export async function resolveAppUrl(ctx: Ctx) { }) } - // search prod apps for a url that matches + // search prod apps for an url that matches const apps: App[] = await context.doInTenant( tenantId, () => getAllApps({ dev: false }) as Promise @@ -59,10 +59,7 @@ export function isServingApp(ctx: Ctx) { return true } // prod app - if (ctx.path.startsWith(PROD_APP_PREFIX)) { - return true - } - return false + return ctx.path.startsWith(PROD_APP_PREFIX) } export function isServingBuilder(ctx: Ctx): boolean { @@ -99,7 +96,7 @@ export async function getAppIdFromCtx(ctx: Ctx) { } // look in the path - const pathId = parseAppIdFromUrl(ctx.path) + const pathId = parseAppIdFromUrlPath(ctx.path) if (!appId && pathId) { appId = confirmAppId(pathId) } @@ -119,34 +116,37 @@ export async function getAppIdFromCtx(ctx: Ctx) { // referer header is present from a builder redirect const referer = ctx.request.headers.referer if (!appId && referer?.includes(BUILDER_APP_PREFIX)) { - const refererId = parseAppIdFromUrl(ctx.request.headers.referer) + const refererId = parseAppIdFromUrlPath(ctx.request.headers.referer) appId = confirmAppId(refererId) } return appId } -function parseAppIdFromUrl(url?: string) { +function parseAppIdFromUrlPath(url?: string) { if (!url) { return } - return url.split("/").find(subPath => subPath.startsWith(APP_PREFIX)) + return url + .split("?")[0] // Remove any possible query string + .split("/") + .find(subPath => subPath.startsWith(APP_PREFIX)) } /** * opens the contents of the specified encrypted JWT. * @return the contents of the token. */ -export function openJwt(token: string) { +export function openJwt(token?: string): T | undefined { if (!token) { - return token + return undefined } try { - return jwt.verify(token, env.JWT_SECRET) + return jwt.verify(token, env.JWT_SECRET as Secret) as T } catch (e) { if (env.JWT_SECRET_FALLBACK) { // fallback to enable rotation - return jwt.verify(token, env.JWT_SECRET_FALLBACK) + return jwt.verify(token, env.JWT_SECRET_FALLBACK) as T } else { throw e } @@ -158,13 +158,9 @@ export function isValidInternalAPIKey(apiKey: string) { return true } // fallback to enable rotation - if ( - env.INTERNAL_API_KEY_FALLBACK && - env.INTERNAL_API_KEY_FALLBACK === apiKey - ) { - return true - } - return false + return !!( + env.INTERNAL_API_KEY_FALLBACK && env.INTERNAL_API_KEY_FALLBACK === apiKey + ) } /** @@ -172,14 +168,14 @@ export function isValidInternalAPIKey(apiKey: string) { * @param ctx The request which is to be manipulated. * @param name The name of the cookie to get. */ -export function getCookie(ctx: Ctx, name: string) { +export function getCookie(ctx: Ctx, name: string) { const cookie = ctx.cookies.get(name) if (!cookie) { - return cookie + return undefined } - return openJwt(cookie) + return openJwt(cookie) } /** @@ -196,7 +192,7 @@ export function setCookie( opts = { sign: true } ) { if (value && opts && opts.sign) { - value = jwt.sign(value, env.JWT_SECRET) + value = jwt.sign(value, env.JWT_SECRET as Secret) } const config: SetOption = { @@ -237,3 +233,17 @@ export function timeout(timeMs: number) { export function isAudited(event: Event) { return !!AuditedEventFriendlyName[event] } + +export function hasCircularStructure(json: any) { + if (typeof json !== "object") { + return false + } + try { + JSON.stringify(json) + } catch (err) { + if (err instanceof Error && err?.message.includes("circular structure")) { + return true + } + } + return false +} diff --git a/packages/backend-core/tests/core/users/users.spec.js b/packages/backend-core/tests/core/users/users.spec.js index ae7109344a..f08c435b95 100644 --- a/packages/backend-core/tests/core/users/users.spec.js +++ b/packages/backend-core/tests/core/users/users.spec.js @@ -1,5 +1,5 @@ -const _ = require('lodash/fp') -const {structures} = require("../../../tests") +const _ = require("lodash/fp") +const { structures } = require("../../../tests") jest.mock("../../../src/context") jest.mock("../../../src/db") @@ -7,10 +7,9 @@ jest.mock("../../../src/db") const context = require("../../../src/context") const db = require("../../../src/db") -const {getCreatorCount} = require('../../../src/users/users') +const { getCreatorCount } = require("../../../src/users/users") describe("Users", () => { - let getGlobalDBMock let getGlobalUserParamsMock let paginationMock @@ -26,26 +25,26 @@ describe("Users", () => { it("Retrieves the number of creators", async () => { const getUsers = (offset, limit, creators = false) => { const range = _.range(offset, limit) - const opts = creators ? {builder: {global: true}} : undefined + const opts = creators ? { builder: { global: true } } : undefined return range.map(() => structures.users.user(opts)) } const page1Data = getUsers(0, 8) const page2Data = getUsers(8, 12, true) getGlobalDBMock.mockImplementation(() => ({ - name : "fake-db", + name: "fake-db", allDocs: () => ({ - rows: [...page1Data, ...page2Data] - }) + rows: [...page1Data, ...page2Data], + }), })) paginationMock.mockImplementationOnce(() => ({ data: page1Data, hasNextPage: true, - nextPage: "1" + nextPage: "1", })) paginationMock.mockImplementation(() => ({ data: page2Data, hasNextPage: false, - nextPage: undefined + nextPage: undefined, })) const creatorsCount = await getCreatorCount() expect(creatorsCount).toBe(4) diff --git a/packages/backend-core/tests/core/utilities/mocks/alerts.ts b/packages/backend-core/tests/core/utilities/mocks/alerts.ts index 90c9759c92..0b26e98363 100644 --- a/packages/backend-core/tests/core/utilities/mocks/alerts.ts +++ b/packages/backend-core/tests/core/utilities/mocks/alerts.ts @@ -1,3 +1,4 @@ jest.mock("../../../../src/logging/alerts") import * as _alerts from "../../../../src/logging/alerts" + export const alerts = jest.mocked(_alerts) diff --git a/packages/backend-core/tests/core/utilities/mocks/index.ts b/packages/backend-core/tests/core/utilities/mocks/index.ts index 9a72b38ef5..8705e563cb 100644 --- a/packages/backend-core/tests/core/utilities/mocks/index.ts +++ b/packages/backend-core/tests/core/utilities/mocks/index.ts @@ -1,5 +1,6 @@ jest.mock("../../../../src/accounts") import * as _accounts from "../../../../src/accounts" + export const accounts = jest.mocked(_accounts) export * as date from "./date" diff --git a/packages/backend-core/tests/core/utilities/structures/generator.ts b/packages/backend-core/tests/core/utilities/structures/generator.ts index ed4dac8255..64eb5ecc97 100644 --- a/packages/backend-core/tests/core/utilities/structures/generator.ts +++ b/packages/backend-core/tests/core/utilities/structures/generator.ts @@ -1,2 +1,3 @@ import Chance from "./Chance" + export const generator = new Chance() diff --git a/packages/backend-core/tests/core/utilities/structures/users.ts b/packages/backend-core/tests/core/utilities/structures/users.ts index 66d23696e0..8f4096d401 100644 --- a/packages/backend-core/tests/core/utilities/structures/users.ts +++ b/packages/backend-core/tests/core/utilities/structures/users.ts @@ -12,7 +12,7 @@ import { generator } from "./generator" import { tenant } from "." export const newEmail = () => { - return `${uuid()}@test.com` + return `${uuid()}@example.com` } export const user = (userProps?: Partial>): User => { @@ -21,7 +21,7 @@ export const user = (userProps?: Partial>): User => { _id: userId, userId, email: newEmail(), - password: "test", + password: "password", roles: { app_test: "admin" }, firstName: generator.first(), lastName: generator.last(), diff --git a/packages/backend-core/tests/jestSetup.ts b/packages/backend-core/tests/jestSetup.ts index 42a24ce733..e5d144290b 100644 --- a/packages/backend-core/tests/jestSetup.ts +++ b/packages/backend-core/tests/jestSetup.ts @@ -9,6 +9,7 @@ mocks.fetch.enable() // mock all dates to 2020-01-01T00:00:00.000Z // use tk.reset() to use real dates in individual tests import tk from "timekeeper" + tk.freeze(mocks.date.MOCK_DATE) if (!process.env.DEBUG) { diff --git a/packages/bbui/src/ActionButton/ActionButton.svelte b/packages/bbui/src/ActionButton/ActionButton.svelte index 427a98f888..0e6ec3d155 100644 --- a/packages/bbui/src/ActionButton/ActionButton.svelte +++ b/packages/bbui/src/ActionButton/ActionButton.svelte @@ -130,5 +130,6 @@ max-width: 150px; transform: translateX(-50%); text-align: center; + z-index: 1; } diff --git a/packages/bbui/src/ActionGroup/ActionGroup.svelte b/packages/bbui/src/ActionGroup/ActionGroup.svelte index 43d8cd8de5..978e920c42 100644 --- a/packages/bbui/src/ActionGroup/ActionGroup.svelte +++ b/packages/bbui/src/ActionGroup/ActionGroup.svelte @@ -1,5 +1,6 @@ @@ -81,7 +78,7 @@ var(--spacing-xl); } .property-panel.no-title { - padding: var(--spacing-xl); + padding-top: var(--spacing-xl); } .show { diff --git a/packages/bbui/src/Divider/Divider.svelte b/packages/bbui/src/Divider/Divider.svelte index e4f0f2fb61..f6837ca97c 100644 --- a/packages/bbui/src/Divider/Divider.svelte +++ b/packages/bbui/src/Divider/Divider.svelte @@ -1,5 +1,6 @@ - + diff --git a/packages/bbui/src/Form/Combobox.svelte b/packages/bbui/src/Form/Combobox.svelte index 343af559cb..44854d949e 100644 --- a/packages/bbui/src/Form/Combobox.svelte +++ b/packages/bbui/src/Form/Combobox.svelte @@ -11,6 +11,7 @@ export let error = null export let placeholder = "Choose an option or type" export let options = [] + export let helpText = null export let getOptionLabel = option => extractProperty(option, "label") export let getOptionValue = option => extractProperty(option, "value") @@ -27,7 +28,7 @@ } - + option @@ -34,7 +33,6 @@