Merge branch 'master' of github.com:Budibase/budibase into global-bindings
This commit is contained in:
commit
c3ebbb76e2
|
@ -99,11 +99,6 @@ jobs:
|
||||||
else
|
else
|
||||||
yarn test --ignore=@budibase/worker --ignore=@budibase/server --ignore=@budibase/pro
|
yarn test --ignore=@budibase/worker --ignore=@budibase/server --ignore=@budibase/pro
|
||||||
fi
|
fi
|
||||||
- uses: codecov/codecov-action@v3
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos
|
|
||||||
name: codecov-umbrella
|
|
||||||
verbose: true
|
|
||||||
|
|
||||||
test-worker:
|
test-worker:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -129,12 +124,6 @@ jobs:
|
||||||
yarn test --scope=@budibase/worker
|
yarn test --scope=@budibase/worker
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- uses: codecov/codecov-action@v3
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.CODECOV_TOKEN || github.token }} # not required for public repos
|
|
||||||
name: codecov-umbrella
|
|
||||||
verbose: true
|
|
||||||
|
|
||||||
test-server:
|
test-server:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
@ -159,12 +148,6 @@ jobs:
|
||||||
yarn test --scope=@budibase/server
|
yarn test --scope=@budibase/server
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- uses: codecov/codecov-action@v3
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.CODECOV_TOKEN || github.token }} # not required for public repos
|
|
||||||
name: codecov-umbrella
|
|
||||||
verbose: true
|
|
||||||
|
|
||||||
test-pro:
|
test-pro:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase'
|
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'Budibase/budibase'
|
||||||
|
|
|
@ -10,6 +10,7 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/stale@v8
|
- uses: actions/stale@v8
|
||||||
with:
|
with:
|
||||||
|
days-before-stale: 330
|
||||||
operations-per-run: 1
|
operations-per-run: 1
|
||||||
# stale rules for PRs
|
# stale rules for PRs
|
||||||
days-before-pr-stale: 7
|
days-before-pr-stale: 7
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
/packages/server @Budibase/backend
|
||||||
|
/packages/worker @Budibase/backend
|
||||||
|
/packages/backend-core @Budibase/backend
|
|
@ -1,9 +1,6 @@
|
||||||
dependencies:
|
dependencies:
|
||||||
- name: couchdb
|
- name: couchdb
|
||||||
repository: https://apache.github.io/couchdb-helm
|
repository: https://apache.github.io/couchdb-helm
|
||||||
version: 3.3.4
|
version: 4.3.0
|
||||||
- name: ingress-nginx
|
digest: sha256:94449a7f195b186f5af33ec5aa66d58b36bede240fae710f021ca87837b30606
|
||||||
repository: https://kubernetes.github.io/ingress-nginx
|
generated: "2023-11-20T17:43:02.777596Z"
|
||||||
version: 4.0.13
|
|
||||||
digest: sha256:20892705c2d8e64c98257d181063a514ac55013e2b43399a6e54868a97f97845
|
|
||||||
generated: "2021-12-30T18:55:30.878411Z"
|
|
||||||
|
|
|
@ -17,10 +17,6 @@ version: 0.0.0
|
||||||
appVersion: 0.0.0
|
appVersion: 0.0.0
|
||||||
dependencies:
|
dependencies:
|
||||||
- name: couchdb
|
- name: couchdb
|
||||||
version: 3.3.4
|
version: 4.3.0
|
||||||
repository: https://apache.github.io/couchdb-helm
|
repository: https://apache.github.io/couchdb-helm
|
||||||
condition: services.couchdb.enabled
|
condition: services.couchdb.enabled
|
||||||
- name: ingress-nginx
|
|
||||||
version: 4.0.13
|
|
||||||
repository: https://kubernetes.github.io/ingress-nginx
|
|
||||||
condition: ingress.nginx
|
|
||||||
|
|
|
@ -1,39 +1,217 @@
|
||||||
# Budibase
|
# budibase
|
||||||
|
|
||||||
[Budibase](https://budibase.com/) Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes.
|
Budibase is an open source low-code platform, helping thousands of teams build apps for their workplace in minutes.
|
||||||
|
|
||||||
## TL;DR;
|
|
||||||
```console
|
|
||||||
$ cd chart
|
|
||||||
$ helm install budibase .
|
|
||||||
```
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
This chart bootstraps a [Budibase](https://budibase.com/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
- helm v3 or above
|
- `helm` v3 or above
|
||||||
- Kubernetes 1.4+
|
- Kubernetes 1.4+
|
||||||
- PV provisioner support in the underlying infrastructure (with persistence storage enabled)
|
- A storage controller (if you want to use persistent storage)
|
||||||
|
- An ingress controller (if you want to define an `Ingress` resource)
|
||||||
|
- `metrics-server` (if you want to make use of horizontal pod autoscaling)
|
||||||
|
|
||||||
## Installing the Chart
|
## Chart dependencies
|
||||||
|
|
||||||
To install the chart with the release name `budi-release`:
|
This chart depends on the official Apache CouchDB chart. You can see its
|
||||||
|
documentation here:
|
||||||
|
<https://github.com/apache/couchdb-helm/tree/couchdb-4.3.0/couchdb>.
|
||||||
|
|
||||||
|
## Upgrading
|
||||||
|
|
||||||
|
### `2.x` to `3.0.0`
|
||||||
|
|
||||||
|
We made a number of breaking changes in this release to make the chart more
|
||||||
|
idiomatic and easier to use.
|
||||||
|
|
||||||
|
1. We no longer bundle `ingress-nginx`. If you were relying on this to supply
|
||||||
|
an ingress controller to your cluster, you will now need to deploy that
|
||||||
|
separately. You'll find guidance for that here:
|
||||||
|
<https://kubernetes.github.io/ingress-nginx/>.
|
||||||
|
2. We've upgraded the version of the [CouchDB chart](https://github.com/apache/couchdb-helm)
|
||||||
|
we use from `3.3.4` to `4.3.0`. The primary motivation for this was to align
|
||||||
|
the CouchDB chart used with the CouchDB version used, which has also updated
|
||||||
|
from 3.1.1 to 3.2.1. Additionally, we're moving away from the official CouchDB
|
||||||
|
to one we're building ourselves.
|
||||||
|
3. We've separated out the supplied AWS ALB ingress resource for those deploying
|
||||||
|
into EKS. Where previously you enabled this by setting `ingress.enabled: false`
|
||||||
|
and `ingress.aws: true`, you now set `awsAlbIngress.enabled: true` and all
|
||||||
|
configuration for it is under `awsAlbIngress`.
|
||||||
|
4. The `HorizontalPodAutoscaler` that was configured at `hpa.enabled: true` has
|
||||||
|
been split into 3 separate HPAs, one for each of `apps`, `worker`, and `proxy`.
|
||||||
|
They are configured at `services.{apps,worker,proxy}.autoscaling`.
|
||||||
|
|
||||||
|
## Installing
|
||||||
|
|
||||||
|
To install the chart from our repository:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ helm install budi-release .
|
$ helm repo add budibase https://budibase.github.io/budibase/
|
||||||
|
$ helm repo update
|
||||||
|
$ helm install --create-namespace --namespace budibase budibase budibase/budibase
|
||||||
```
|
```
|
||||||
|
|
||||||
The command deploys Budibase on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
To install the chart from this repo:
|
||||||
|
|
||||||
> **Tip**: List all releases using `helm list`
|
|
||||||
|
|
||||||
## Uninstalling the Chart
|
|
||||||
|
|
||||||
To uninstall/delete the `my-release` deployment:
|
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ helm delete my-release
|
$ git clone git@github.com:budibase/budibase.git
|
||||||
|
$ cd budibase/charts/budibase
|
||||||
|
$ helm install --create-namespace --namespace budibase budibase .
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Example minimal configuration
|
||||||
|
|
||||||
|
Here's an example `values.yaml` that would get a Budibase instance running in a home
|
||||||
|
cluster using an nginx ingress controller and NFS as cluster storage (basically one of our
|
||||||
|
staff's homelabs).
|
||||||
|
|
||||||
|
<details>
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
ingress:
|
||||||
|
enabled: true
|
||||||
|
className: "nginx"
|
||||||
|
hosts:
|
||||||
|
- host: budibase.local # set this to whatever DNS name you'd use
|
||||||
|
paths:
|
||||||
|
- backend:
|
||||||
|
service:
|
||||||
|
name: proxy-service
|
||||||
|
port:
|
||||||
|
number: 10000
|
||||||
|
path: /
|
||||||
|
pathType: Prefix
|
||||||
|
|
||||||
|
couchdb:
|
||||||
|
persistentVolume:
|
||||||
|
enabled: true
|
||||||
|
storageClass: "nfs-client"
|
||||||
|
adminPassword: admin
|
||||||
|
|
||||||
|
services:
|
||||||
|
objectStore:
|
||||||
|
storageClass: "nfs-client"
|
||||||
|
redis:
|
||||||
|
storageClass: "nfs-client"
|
||||||
|
```
|
||||||
|
|
||||||
|
If you wanted to use this when bringing up Budibase in your own cluster, you could save it
|
||||||
|
to your hard disk and run the following:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ helm install --create-namespace --namespace budibase budibase . -f values.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
## Configuring
|
||||||
|
|
||||||
|
| Key | Type | Default | Description |
|
||||||
|
|-----|------|---------|-------------|
|
||||||
|
| affinity | object | `{}` | Sets the affinity for all pods created by this chart. Should not ordinarily need to be changed. See <https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/> for more information on affinity. |
|
||||||
|
| awsAlbIngress.certificateArn | string | `""` | If you're wanting to use HTTPS, you'll need to create an ACM certificate and specify the ARN here. |
|
||||||
|
| awsAlbIngress.enabled | bool | `false` | Whether to create an ALB Ingress resource pointing to the Budibase proxy. Requires the AWS ALB Ingress Controller. |
|
||||||
|
| couchdb.clusterSize | int | `1` | The number of replicas to run in the CouchDB cluster. We set this to 1 by default to make things simpler, but you can set it to 3 if you need a high-availability CouchDB cluster. |
|
||||||
|
| couchdb.couchdbConfig.couchdb.uuid | string | `"budibase-couchdb"` | Unique identifier for this CouchDB server instance. You shouldn't need to change this. |
|
||||||
|
| couchdb.image | object | `{}` | We use a custom CouchDB image for running Budibase and we don't support using any other CouchDB image. You shouldn't change this, and if you do we can't guarantee that Budibase will work. |
|
||||||
|
| globals.apiEncryptionKey | string | `""` | Used for encrypting API keys and environment variables when stored in the database. You don't need to set this if `createSecrets` is true. |
|
||||||
|
| globals.appVersion | string | `""` | The version of Budibase to deploy. Defaults to what's specified by {{ .Chart.AppVersion }}. Ends up being used as the image version tag for the apps, proxy, and worker images. |
|
||||||
|
| globals.automationMaxIterations | string | `"200"` | The maximum number of iterations allows for an automation loop step. You can read more about looping here: <https://docs.budibase.com/docs/looping>. |
|
||||||
|
| globals.budibaseEnv | string | `"PRODUCTION"` | Sets the environment variable BUDIBASE_ENVIRONMENT for the apps and worker pods. Should not ordinarily need to be changed. |
|
||||||
|
| globals.cookieDomain | string | `""` | Sets the domain attribute of the cookie that Budibase uses to store session information. See <https://developer.mozilla.org/en-US/docs/Web/HTTP/Cookies#define_where_cookies_are_sent> for details on why you might want to set this. |
|
||||||
|
| globals.createSecrets | bool | `true` | Create an internal API key, JWT secret, object store access key and secret, and store them in a Kubernetes `Secret`. |
|
||||||
|
| globals.enableAnalytics | string | `"1"` | Whether to enable analytics or not. You can read more about our analytics here: <https://docs.budibase.com/docs/analytics>. |
|
||||||
|
| globals.google | object | `{"clientId":"","secret":""}` | Google OAuth settings. These can also be set in the Budibase UI, see <https://docs.budibase.com/docs/sso-with-google> for details. |
|
||||||
|
| globals.google.clientId | string | `""` | Client ID of your Google OAuth app. |
|
||||||
|
| globals.google.secret | string | `""` | Client secret of your Google OAuth app. |
|
||||||
|
| globals.httpMigrations | string | `"0"` | Whether or not to enable doing data migrations over the HTTP API. If this is set to "0", migrations are run on startup. You shouldn't ordinarily need to change this. |
|
||||||
|
| globals.internalApiKey | string | `""` | API key used for internal Budibase API calls. You don't need to set this if `createSecrets` is true. |
|
||||||
|
| globals.internalApiKeyFallback | string | `""` | A fallback value for `internalApiKey`. If you're rotating your encryption key, you can set this to the old value for the duration of the rotation. |
|
||||||
|
| globals.jwtSecret | string | `""` | Secret used for signing JWTs. You don't need to set this if `createSecrets` is true. |
|
||||||
|
| globals.jwtSecretFallback | string | `""` | A fallback value for `jwtSecret`. If you're rotating your JWT secret, you can set this to the old value for the duration of the rotation. |
|
||||||
|
| globals.platformUrl | string | `""` | Set the `platformUrl` binding. You can also do this in Settings > Organisation if you are self-hosting. |
|
||||||
|
| globals.smtp.enabled | bool | `false` | Whether to enable SMTP or not. |
|
||||||
|
| globals.smtp.from | string | `""` | The email address to use in the "From:" field of emails sent by Budibase. |
|
||||||
|
| globals.smtp.host | string | `""` | The hostname of your SMTP server. |
|
||||||
|
| globals.smtp.password | string | `""` | The password to use when authenticating with your SMTP server. |
|
||||||
|
| globals.smtp.port | string | `"587"` | The port of your SMTP server. |
|
||||||
|
| globals.smtp.user | string | `""` | The username to use when authenticating with your SMTP server. |
|
||||||
|
| globals.tenantFeatureFlags | string | `"*:LICENSING,*:USER_GROUPS,*:ONBOARDING_TOUR"` | Sets what feature flags are enabled and for which tenants. Should not ordinarily need to be changed. |
|
||||||
|
| imagePullSecrets | list | `[]` | Passed to all pods created by this chart. Should not ordinarily need to be changed. |
|
||||||
|
| ingress.className | string | `""` | What ingress class to use. |
|
||||||
|
| ingress.enabled | bool | `true` | Whether to create an Ingress resource pointing to the Budibase proxy. |
|
||||||
|
| ingress.hosts | list | `[]` | Standard hosts block for the Ingress resource. Defaults to pointing to the Budibase proxy. |
|
||||||
|
| nameOverride | string | `""` | Override the name of the deploymen. Defaults to {{ .Chart.Name }}. |
|
||||||
|
| service.port | int | `10000` | Port to expose on the service. |
|
||||||
|
| service.type | string | `"ClusterIP"` | Service type for the service that points to the main Budibase proxy pod. |
|
||||||
|
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
|
||||||
|
| serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
|
||||||
|
| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
|
||||||
|
| services.apps.autoscaling.enabled | bool | `false` | Whether to enable horizontal pod autoscaling for the apps service. |
|
||||||
|
| services.apps.autoscaling.maxReplicas | int | `10` | |
|
||||||
|
| services.apps.autoscaling.minReplicas | int | `1` | |
|
||||||
|
| services.apps.autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization percentage for the apps service. Note that for autoscaling to work, you will need to have metrics-server configured, and resources set for the apps pods. |
|
||||||
|
| services.apps.httpLogging | int | `1` | Whether or not to log HTTP requests to the apps service. |
|
||||||
|
| services.apps.livenessProbe | object | HTTP health checks. | Liveness probe configuration for apps pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
|
||||||
|
| services.apps.logLevel | string | `"info"` | The log level for the apps service. |
|
||||||
|
| services.apps.readinessProbe | object | HTTP health checks. | Readiness probe configuration for apps pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
|
||||||
|
| services.apps.replicaCount | int | `1` | The number of apps replicas to run. |
|
||||||
|
| services.apps.resources | object | `{}` | The resources to use for apps pods. See <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/> for more information on how to set these. |
|
||||||
|
| services.apps.startupProbe | object | HTTP health checks. | Startup probe configuration for apps pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
|
||||||
|
| services.couchdb.backup.enabled | bool | `false` | Whether or not to enable periodic CouchDB backups. This works by replicating to another CouchDB instance. |
|
||||||
|
| services.couchdb.backup.interval | string | `""` | Backup interval in seconds |
|
||||||
|
| services.couchdb.backup.resources | object | `{}` | The resources to use for CouchDB backup pods. See <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/> for more information on how to set these. |
|
||||||
|
| services.couchdb.backup.target | string | `""` | Target couchDB instance to back up to, either a hostname or an IP address. |
|
||||||
|
| services.couchdb.enabled | bool | `true` | Whether or not to spin up a CouchDB instance in your cluster. True by default, and the configuration for the CouchDB instance is under the `couchdb` key at the root of this file. You can see what options are available to you by looking at the official CouchDB Helm chart: <https://github.com/apache/couchdb-helm/tree/couchdb-4.3.0/couchdb>. |
|
||||||
|
| services.couchdb.port | int | `5984` | |
|
||||||
|
| services.dns | string | `"cluster.local"` | The DNS suffix to use for service discovery. You only need to change this if you've configured your cluster to use a different DNS suffix. |
|
||||||
|
| services.objectStore.accessKey | string | `""` | AWS_ACCESS_KEY if using S3 |
|
||||||
|
| services.objectStore.browser | bool | `true` | Whether to enable the Minio web console or not. If you're exposing Minio to the Internet (via a custom Ingress record, for example), you should set this to false. If you're only exposing Minio to your cluster, you can leave this as true. |
|
||||||
|
| services.objectStore.cloudfront.cdn | string | `""` | Set the url of a distribution to enable cloudfront. |
|
||||||
|
| services.objectStore.cloudfront.privateKey64 | string | `""` | Base64 encoded private key for the above public key. |
|
||||||
|
| services.objectStore.cloudfront.publicKeyId | string | `""` | ID of public key stored in cloudfront. |
|
||||||
|
| services.objectStore.minio | bool | `true` | Set to false if using another object store, such as S3. You will need to set `services.objectStore.url` to point to your bucket if you do this. |
|
||||||
|
| services.objectStore.region | string | `""` | AWS_REGION if using S3 |
|
||||||
|
| services.objectStore.resources | object | `{}` | The resources to use for Minio pods. See <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/> for more information on how to set these. |
|
||||||
|
| services.objectStore.secretKey | string | `""` | AWS_SECRET_ACCESS_KEY if using S3 |
|
||||||
|
| services.objectStore.storage | string | `"100Mi"` | How much storage to give Minio in its PersistentVolumeClaim. |
|
||||||
|
| services.objectStore.storageClass | string | `""` | If defined, storageClassName: <storageClass> If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. |
|
||||||
|
| services.objectStore.url | string | `"http://minio-service:9000"` | URL to use for object storage. Only change this if you're using an external object store, such as S3. Remember to set `minio: false` if you do this. |
|
||||||
|
| services.proxy.autoscaling.enabled | bool | `false` | Whether to enable horizontal pod autoscaling for the proxy service. |
|
||||||
|
| services.proxy.autoscaling.maxReplicas | int | `10` | |
|
||||||
|
| services.proxy.autoscaling.minReplicas | int | `1` | |
|
||||||
|
| services.proxy.autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization percentage for the proxy service. Note that for autoscaling to work, you will need to have metrics-server configured, and resources set for the proxy pods. |
|
||||||
|
| services.proxy.livenessProbe | object | HTTP health checks. | Liveness probe configuration for proxy pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
|
||||||
|
| services.proxy.readinessProbe | object | HTTP health checks. | Readiness probe configuration for proxy pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
|
||||||
|
| services.proxy.replicaCount | int | `1` | The number of proxy replicas to run. |
|
||||||
|
| services.proxy.resources | object | `{}` | The resources to use for proxy pods. See <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/> for more information on how to set these. |
|
||||||
|
| services.proxy.startupProbe | object | HTTP health checks. | Startup probe configuration for proxy pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
|
||||||
|
| services.redis.enabled | bool | `true` | Whether or not to deploy a Redis pod into your cluster. |
|
||||||
|
| services.redis.password | string | `"budibase"` | The password to use when connecting to Redis. It's recommended that you change this from the default if you're running Redis in-cluster. |
|
||||||
|
| services.redis.port | int | `6379` | Port to expose Redis on. |
|
||||||
|
| services.redis.resources | object | `{}` | The resources to use for Redis pods. See <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/> for more information on how to set these. |
|
||||||
|
| services.redis.storage | string | `"100Mi"` | How much persistent storage to allocate to Redis. |
|
||||||
|
| services.redis.storageClass | string | `""` | If defined, storageClassName: <storageClass> If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. |
|
||||||
|
| services.redis.url | string | `""` | If you choose to run Redis externally to this chart, you can specify the connection details here. |
|
||||||
|
| services.worker.autoscaling.enabled | bool | `false` | Whether to enable horizontal pod autoscaling for the worker service. |
|
||||||
|
| services.worker.autoscaling.maxReplicas | int | `10` | |
|
||||||
|
| services.worker.autoscaling.minReplicas | int | `1` | |
|
||||||
|
| services.worker.autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization percentage for the worker service. Note that for autoscaling to work, you will need to have metrics-server configured, and resources set for the worker pods. |
|
||||||
|
| services.worker.httpLogging | int | `1` | Whether or not to log HTTP requests to the worker service. |
|
||||||
|
| services.worker.livenessProbe | object | HTTP health checks. | Liveness probe configuration for worker pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
|
||||||
|
| services.worker.logLevel | string | `"info"` | The log level for the worker service. |
|
||||||
|
| services.worker.readinessProbe | object | HTTP health checks. | Readiness probe configuration for worker pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
|
||||||
|
| services.worker.replicaCount | int | `1` | The number of worker replicas to run. |
|
||||||
|
| services.worker.resources | object | `{}` | The resources to use for worker pods. See <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/> for more information on how to set these. |
|
||||||
|
| services.worker.startupProbe | object | HTTP health checks. | Startup probe configuration for worker pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
|
||||||
|
| tolerations | list | `[]` | Sets the tolerations for all pods created by this chart. Should not ordinarily need to be changed. See <https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/> for more information on tolerations. |
|
||||||
|
|
||||||
|
## Uninstalling
|
||||||
|
|
||||||
|
To uninstall the chart, assuming you named the release `budibase` (both commands in the installation section do so):
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ helm uninstall --namespace budibase budibase
|
||||||
|
```
|
||||||
|
|
||||||
|
----------------------------------------------
|
||||||
|
Autogenerated from chart metadata using [helm-docs v1.11.3](https://github.com/norwoodj/helm-docs/releases/v1.11.3)
|
||||||
|
|
|
@ -0,0 +1,117 @@
|
||||||
|
{{ template "chart.header" . }}
|
||||||
|
{{ template "chart.description" . }}
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- `helm` v3 or above
|
||||||
|
- Kubernetes 1.4+
|
||||||
|
- A storage controller (if you want to use persistent storage)
|
||||||
|
- An ingress controller (if you want to define an `Ingress` resource)
|
||||||
|
- `metrics-server` (if you want to make use of horizontal pod autoscaling)
|
||||||
|
|
||||||
|
## Chart dependencies
|
||||||
|
|
||||||
|
This chart depends on the official Apache CouchDB chart. You can see its
|
||||||
|
documentation here:
|
||||||
|
<https://github.com/apache/couchdb-helm/tree/couchdb-4.3.0/couchdb>.
|
||||||
|
|
||||||
|
## Upgrading
|
||||||
|
|
||||||
|
### `2.x` to `3.0.0`
|
||||||
|
|
||||||
|
We made a number of breaking changes in this release to make the chart more
|
||||||
|
idiomatic and easier to use.
|
||||||
|
|
||||||
|
1. We no longer bundle `ingress-nginx`. If you were relying on this to supply
|
||||||
|
an ingress controller to your cluster, you will now need to deploy that
|
||||||
|
separately. You'll find guidance for that here:
|
||||||
|
<https://kubernetes.github.io/ingress-nginx/>.
|
||||||
|
2. We've upgraded the version of the [CouchDB chart](https://github.com/apache/couchdb-helm)
|
||||||
|
we use from `3.3.4` to `4.3.0`. The primary motivation for this was to align
|
||||||
|
the CouchDB chart used with the CouchDB version used, which has also updated
|
||||||
|
from 3.1.1 to 3.2.1. Additionally, we're moving away from the official CouchDB
|
||||||
|
to one we're building ourselves.
|
||||||
|
3. We've separated out the supplied AWS ALB ingress resource for those deploying
|
||||||
|
into EKS. Where previously you enabled this by setting `ingress.enabled: false`
|
||||||
|
and `ingress.aws: true`, you now set `awsAlbIngress.enabled: true` and all
|
||||||
|
configuration for it is under `awsAlbIngress`.
|
||||||
|
4. The `HorizontalPodAutoscaler` that was configured at `hpa.enabled: true` has
|
||||||
|
been split into 3 separate HPAs, one for each of `apps`, `worker`, and `proxy`.
|
||||||
|
They are configured at `services.{apps,worker,proxy}.autoscaling`.
|
||||||
|
|
||||||
|
## Installing
|
||||||
|
|
||||||
|
To install the chart from our repository:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ helm repo add budibase https://budibase.github.io/budibase/
|
||||||
|
$ helm repo update
|
||||||
|
$ helm install --create-namespace --namespace budibase budibase budibase/budibase
|
||||||
|
```
|
||||||
|
|
||||||
|
To install the chart from this repo:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ git clone git@github.com:budibase/budibase.git
|
||||||
|
$ cd budibase/charts/budibase
|
||||||
|
$ helm install --create-namespace --namespace budibase budibase .
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example minimal configuration
|
||||||
|
|
||||||
|
Here's an example `values.yaml` that would get a Budibase instance running in a home
|
||||||
|
cluster using an nginx ingress controller and NFS as cluster storage (basically one of our
|
||||||
|
staff's homelabs).
|
||||||
|
|
||||||
|
<details>
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
ingress:
|
||||||
|
enabled: true
|
||||||
|
className: "nginx"
|
||||||
|
hosts:
|
||||||
|
- host: budibase.local # set this to whatever DNS name you'd use
|
||||||
|
paths:
|
||||||
|
- backend:
|
||||||
|
service:
|
||||||
|
name: proxy-service
|
||||||
|
port:
|
||||||
|
number: 10000
|
||||||
|
path: /
|
||||||
|
pathType: Prefix
|
||||||
|
|
||||||
|
couchdb:
|
||||||
|
persistentVolume:
|
||||||
|
enabled: true
|
||||||
|
storageClass: "nfs-client"
|
||||||
|
adminPassword: admin
|
||||||
|
|
||||||
|
services:
|
||||||
|
objectStore:
|
||||||
|
storageClass: "nfs-client"
|
||||||
|
redis:
|
||||||
|
storageClass: "nfs-client"
|
||||||
|
```
|
||||||
|
|
||||||
|
If you wanted to use this when bringing up Budibase in your own cluster, you could save it
|
||||||
|
to your hard disk and run the following:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ helm install --create-namespace --namespace budibase budibase . -f values.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
## Configuring
|
||||||
|
|
||||||
|
{{ template "chart.valuesTable" . }}
|
||||||
|
|
||||||
|
## Uninstalling
|
||||||
|
|
||||||
|
To uninstall the chart, assuming you named the release `budibase` (both commands in the installation section do so):
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ helm uninstall --namespace budibase budibase
|
||||||
|
```
|
||||||
|
|
||||||
|
{{ template "helm-docs.versionFooter" . }}
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1,4 +1,4 @@
|
||||||
{{- if .Values.ingress.aws }}
|
{{- if .Values.awsAlbIngress.enabled }}
|
||||||
apiVersion: networking.k8s.io/v1
|
apiVersion: networking.k8s.io/v1
|
||||||
kind: Ingress
|
kind: Ingress
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -7,24 +7,24 @@ metadata:
|
||||||
kubernetes.io/ingress.class: alb
|
kubernetes.io/ingress.class: alb
|
||||||
alb.ingress.kubernetes.io/scheme: internet-facing
|
alb.ingress.kubernetes.io/scheme: internet-facing
|
||||||
alb.ingress.kubernetes.io/target-type: ip
|
alb.ingress.kubernetes.io/target-type: ip
|
||||||
alb.ingress.kubernetes.io/success-codes: 200,301
|
alb.ingress.kubernetes.io/success-codes: '200'
|
||||||
alb.ingress.kubernetes.io/healthcheck-path: /
|
alb.ingress.kubernetes.io/healthcheck-path: '/health'
|
||||||
{{- if .Values.ingress.certificateArn }}
|
{{- if .Values.awsAlbIngress.certificateArn }}
|
||||||
alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}'
|
alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}'
|
||||||
alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]'
|
alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]'
|
||||||
alb.ingress.kubernetes.io/certificate-arn: {{ .Values.ingress.certificateArn }}
|
alb.ingress.kubernetes.io/certificate-arn: {{ .Values.awsAlbIngress.certificateArn }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.ingress.sslPolicy }}
|
{{- if .Values.awsAlbIngress.sslPolicy }}
|
||||||
alb.ingress.kubernetes.io/actions.ssl-policy: {{ .Values.ingress.sslPolicy }}
|
alb.ingress.kubernetes.io/actions.ssl-policy: {{ .Values.awsAlbIngress.sslPolicy }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.ingress.securityGroups }}
|
{{- if .Values.awsAlbIngress.securityGroups }}
|
||||||
alb.ingress.kubernetes.io/security-groups: {{ .Values.ingress.securityGroups }}
|
alb.ingress.kubernetes.io/security-groups: {{ .Values.awsAlbIngress.securityGroups }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
spec:
|
spec:
|
||||||
rules:
|
rules:
|
||||||
- http:
|
- http:
|
||||||
paths:
|
paths:
|
||||||
{{- if .Values.ingress.certificateArn }}
|
{{- if .Values.awsAlbIngress.certificateArn }}
|
||||||
- path: /
|
- path: /
|
||||||
pathType: Prefix
|
pathType: Prefix
|
||||||
backend:
|
backend:
|
||||||
|
|
|
@ -2,12 +2,9 @@ apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
kompose.cmd: kompose convert
|
|
||||||
kompose.version: 1.21.0 (992df58d8)
|
|
||||||
{{ if .Values.services.apps.deploymentAnnotations }}
|
{{ if .Values.services.apps.deploymentAnnotations }}
|
||||||
{{- toYaml .Values.services.apps.deploymentAnnotations | indent 4 -}}
|
{{- toYaml .Values.services.apps.deploymentAnnotations | indent 4 -}}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
io.kompose.service: app-service
|
io.kompose.service: app-service
|
||||||
{{ if .Values.services.apps.deploymentLabels }}
|
{{ if .Values.services.apps.deploymentLabels }}
|
||||||
|
@ -24,12 +21,9 @@ spec:
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
kompose.cmd: kompose convert
|
|
||||||
kompose.version: 1.21.0 (992df58d8)
|
|
||||||
{{ if .Values.services.apps.templateAnnotations }}
|
{{ if .Values.services.apps.templateAnnotations }}
|
||||||
{{- toYaml .Values.services.apps.templateAnnotations | indent 8 -}}
|
{{- toYaml .Values.services.apps.templateAnnotations | indent 8 -}}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
io.kompose.service: app-service
|
io.kompose.service: app-service
|
||||||
{{ if .Values.services.apps.templateLabels }}
|
{{ if .Values.services.apps.templateLabels }}
|
||||||
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
{{- if .Values.services.apps.autoscaling.enabled }}
|
||||||
|
apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }}
|
||||||
|
kind: HorizontalPodAutoscaler
|
||||||
|
metadata:
|
||||||
|
name: {{ include "budibase.fullname" . }}-apps
|
||||||
|
labels:
|
||||||
|
{{- include "budibase.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
scaleTargetRef:
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
name: app-service
|
||||||
|
minReplicas: {{ .Values.services.apps.autoscaling.minReplicas }}
|
||||||
|
maxReplicas: {{ .Values.services.apps.autoscaling.maxReplicas }}
|
||||||
|
metrics:
|
||||||
|
{{- if .Values.services.apps.autoscaling.targetCPUUtilizationPercentage }}
|
||||||
|
- type: Resource
|
||||||
|
resource:
|
||||||
|
name: cpu
|
||||||
|
target:
|
||||||
|
type: Utilization
|
||||||
|
averageUtilization: {{ .Values.services.apps.autoscaling.targetCPUUtilizationPercentage }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.services.apps.autoscaling.targetMemoryUtilizationPercentage }}
|
||||||
|
- type: Resource
|
||||||
|
resource:
|
||||||
|
name: memory
|
||||||
|
target:
|
||||||
|
type: Utilization
|
||||||
|
averageUtilization: {{ .Values.services.apps.autoscaling.targetMemoryUtilizationPercentage }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
|
@ -1,10 +1,6 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
|
||||||
kompose.cmd: kompose convert
|
|
||||||
kompose.version: 1.21.0 (992df58d8)
|
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
io.kompose.service: app-service
|
io.kompose.service: app-service
|
||||||
name: app-service
|
name: app-service
|
||||||
|
|
|
@ -2,10 +2,6 @@
|
||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
|
||||||
kompose.cmd: kompose convert
|
|
||||||
kompose.version: 1.21.0 (992df58d8)
|
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/name: couchdb-backup
|
app.kubernetes.io/name: couchdb-backup
|
||||||
name: couchdb-backup
|
name: couchdb-backup
|
||||||
|
@ -18,10 +14,6 @@ spec:
|
||||||
type: Recreate
|
type: Recreate
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
|
||||||
kompose.cmd: kompose convert
|
|
||||||
kompose.version: 1.21.0 (992df58d8)
|
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/name: couchdb-backup
|
app.kubernetes.io/name: couchdb-backup
|
||||||
spec:
|
spec:
|
||||||
|
|
|
@ -1,28 +0,0 @@
|
||||||
{{- if .Values.autoscaling.enabled }}
|
|
||||||
apiVersion: autoscaling/v2beta1
|
|
||||||
kind: HorizontalPodAutoscaler
|
|
||||||
metadata:
|
|
||||||
name: {{ include "budibase.fullname" . }}
|
|
||||||
labels:
|
|
||||||
{{- include "budibase.labels" . | nindent 4 }}
|
|
||||||
spec:
|
|
||||||
scaleTargetRef:
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
name: {{ include "budibase.fullname" . }}
|
|
||||||
minReplicas: {{ .Values.autoscaling.minReplicas }}
|
|
||||||
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
|
|
||||||
metrics:
|
|
||||||
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
|
|
||||||
- type: Resource
|
|
||||||
resource:
|
|
||||||
name: cpu
|
|
||||||
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
|
||||||
- type: Resource
|
|
||||||
resource:
|
|
||||||
name: memory
|
|
||||||
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
|
@ -2,7 +2,6 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: PersistentVolumeClaim
|
kind: PersistentVolumeClaim
|
||||||
metadata:
|
metadata:
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
io.kompose.service: minio-data
|
io.kompose.service: minio-data
|
||||||
name: minio-data
|
name: minio-data
|
||||||
|
|
|
@ -2,10 +2,6 @@
|
||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
|
||||||
kompose.cmd: kompose convert
|
|
||||||
kompose.version: 1.21.0 (992df58d8)
|
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
io.kompose.service: minio-service
|
io.kompose.service: minio-service
|
||||||
name: minio-service
|
name: minio-service
|
||||||
|
@ -18,10 +14,6 @@ spec:
|
||||||
type: Recreate
|
type: Recreate
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
|
||||||
kompose.cmd: kompose convert
|
|
||||||
kompose.version: 1.21.0 (992df58d8)
|
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
io.kompose.service: minio-service
|
io.kompose.service: minio-service
|
||||||
spec:
|
spec:
|
||||||
|
|
|
@ -2,10 +2,6 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
|
||||||
kompose.cmd: kompose convert
|
|
||||||
kompose.version: 1.21.0 (992df58d8)
|
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
io.kompose.service: minio-service
|
io.kompose.service: minio-service
|
||||||
name: minio-service
|
name: minio-service
|
||||||
|
|
|
@ -2,12 +2,9 @@ apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
kompose.cmd: kompose convert
|
|
||||||
kompose.version: 1.21.0 (992df58d8)
|
|
||||||
{{ if .Values.services.proxy.deploymentAnnotations }}
|
{{ if .Values.services.proxy.deploymentAnnotations }}
|
||||||
{{- toYaml .Values.services.proxy.deploymentAnnotations | indent 4 -}}
|
{{- toYaml .Values.services.proxy.deploymentAnnotations | indent 4 -}}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/name: budibase-proxy
|
app.kubernetes.io/name: budibase-proxy
|
||||||
{{ if .Values.services.proxy.deploymentLabels }}
|
{{ if .Values.services.proxy.deploymentLabels }}
|
||||||
|
@ -19,17 +16,15 @@ spec:
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
app.kubernetes.io/name: budibase-proxy
|
app.kubernetes.io/name: budibase-proxy
|
||||||
|
minReadySeconds: 10
|
||||||
strategy:
|
strategy:
|
||||||
type: RollingUpdate
|
type: RollingUpdate
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
kompose.cmd: kompose convert
|
|
||||||
kompose.version: 1.21.0 (992df58d8)
|
|
||||||
{{ if .Values.services.proxy.templateAnnotations }}
|
{{ if .Values.services.proxy.templateAnnotations }}
|
||||||
{{- toYaml .Values.services.proxy.templateAnnotations | indent 8 -}}
|
{{- toYaml .Values.services.proxy.templateAnnotations | indent 8 -}}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/name: budibase-proxy
|
app.kubernetes.io/name: budibase-proxy
|
||||||
{{ if .Values.services.proxy.templateLabels }}
|
{{ if .Values.services.proxy.templateLabels }}
|
||||||
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
{{- if .Values.services.proxy.autoscaling.enabled }}
|
||||||
|
apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }}
|
||||||
|
kind: HorizontalPodAutoscaler
|
||||||
|
metadata:
|
||||||
|
name: {{ include "budibase.fullname" . }}-proxy
|
||||||
|
labels:
|
||||||
|
{{- include "budibase.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
scaleTargetRef:
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
name: proxy-service
|
||||||
|
minReplicas: {{ .Values.services.proxy.autoscaling.minReplicas }}
|
||||||
|
maxReplicas: {{ .Values.services.proxy.autoscaling.maxReplicas }}
|
||||||
|
metrics:
|
||||||
|
{{- if .Values.services.proxy.autoscaling.targetCPUUtilizationPercentage }}
|
||||||
|
- type: Resource
|
||||||
|
resource:
|
||||||
|
name: cpu
|
||||||
|
target:
|
||||||
|
type: Utilization
|
||||||
|
averageUtilization: {{ .Values.services.proxy.autoscaling.targetCPUUtilizationPercentage }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.services.proxy.autoscaling.targetMemoryUtilizationPercentage }}
|
||||||
|
- type: Resource
|
||||||
|
resource:
|
||||||
|
name: memory
|
||||||
|
target:
|
||||||
|
type: Utilization
|
||||||
|
averageUtilization: {{ .Values.services.proxy.autoscaling.targetMemoryUtilizationPercentage }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
|
@ -1,10 +1,6 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
|
||||||
kompose.cmd: kompose convert
|
|
||||||
kompose.version: 1.21.0 (992df58d8)
|
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/name: budibase-proxy
|
app.kubernetes.io/name: budibase-proxy
|
||||||
name: proxy-service
|
name: proxy-service
|
||||||
|
@ -16,4 +12,4 @@ spec:
|
||||||
selector:
|
selector:
|
||||||
app.kubernetes.io/name: budibase-proxy
|
app.kubernetes.io/name: budibase-proxy
|
||||||
status:
|
status:
|
||||||
loadBalancer: {}
|
loadBalancer: {}
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: PersistentVolumeClaim
|
kind: PersistentVolumeClaim
|
||||||
metadata:
|
metadata:
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
io.kompose.service: redis-data
|
io.kompose.service: redis-data
|
||||||
name: redis-data
|
name: redis-data
|
||||||
|
|
|
@ -2,10 +2,6 @@
|
||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
|
||||||
kompose.cmd: kompose convert
|
|
||||||
kompose.version: 1.21.0 (992df58d8)
|
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
io.kompose.service: redis-service
|
io.kompose.service: redis-service
|
||||||
name: redis-service
|
name: redis-service
|
||||||
|
@ -18,10 +14,6 @@ spec:
|
||||||
type: Recreate
|
type: Recreate
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
|
||||||
kompose.cmd: kompose convert
|
|
||||||
kompose.version: 1.21.0 (992df58d8)
|
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
io.kompose.service: redis-service
|
io.kompose.service: redis-service
|
||||||
spec:
|
spec:
|
||||||
|
|
|
@ -2,10 +2,6 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
|
||||||
kompose.cmd: kompose convert
|
|
||||||
kompose.version: 1.21.0 (992df58d8)
|
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
io.kompose.service: redis-service
|
io.kompose.service: redis-service
|
||||||
name: redis-service
|
name: redis-service
|
||||||
|
|
|
@ -2,12 +2,9 @@ apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
kompose.cmd: kompose convert
|
|
||||||
kompose.version: 1.21.0 (992df58d8)
|
|
||||||
{{ if .Values.services.worker.deploymentAnnotations }}
|
{{ if .Values.services.worker.deploymentAnnotations }}
|
||||||
{{- toYaml .Values.services.worker.deploymentAnnotations | indent 4 -}}
|
{{- toYaml .Values.services.worker.deploymentAnnotations | indent 4 -}}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
io.kompose.service: worker-service
|
io.kompose.service: worker-service
|
||||||
{{ if .Values.services.worker.deploymentLabels }}
|
{{ if .Values.services.worker.deploymentLabels }}
|
||||||
|
@ -24,12 +21,9 @@ spec:
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
kompose.cmd: kompose convert
|
|
||||||
kompose.version: 1.21.0 (992df58d8)
|
|
||||||
{{ if .Values.services.worker.templateAnnotations }}
|
{{ if .Values.services.worker.templateAnnotations }}
|
||||||
{{- toYaml .Values.services.worker.templateAnnotations | indent 8 -}}
|
{{- toYaml .Values.services.worker.templateAnnotations | indent 8 -}}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
io.kompose.service: worker-service
|
io.kompose.service: worker-service
|
||||||
{{ if .Values.services.worker.templateLabels }}
|
{{ if .Values.services.worker.templateLabels }}
|
||||||
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
{{- if .Values.services.worker.autoscaling.enabled }}
|
||||||
|
apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }}
|
||||||
|
kind: HorizontalPodAutoscaler
|
||||||
|
metadata:
|
||||||
|
name: {{ include "budibase.fullname" . }}-worker
|
||||||
|
labels:
|
||||||
|
{{- include "budibase.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
scaleTargetRef:
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
name: worker-service
|
||||||
|
minReplicas: {{ .Values.services.worker.autoscaling.minReplicas }}
|
||||||
|
maxReplicas: {{ .Values.services.worker.autoscaling.maxReplicas }}
|
||||||
|
metrics:
|
||||||
|
{{- if .Values.services.worker.autoscaling.targetCPUUtilizationPercentage }}
|
||||||
|
- type: Resource
|
||||||
|
resource:
|
||||||
|
name: cpu
|
||||||
|
target:
|
||||||
|
type: Utilization
|
||||||
|
averageUtilization: {{ .Values.services.worker.autoscaling.targetCPUUtilizationPercentage }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.services.worker.autoscaling.targetMemoryUtilizationPercentage }}
|
||||||
|
- type: Resource
|
||||||
|
resource:
|
||||||
|
name: memory
|
||||||
|
target:
|
||||||
|
type: Utilization
|
||||||
|
averageUtilization: {{ .Values.services.worker.autoscaling.targetMemoryUtilizationPercentage }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
|
@ -1,10 +1,6 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
|
||||||
kompose.cmd: kompose convert
|
|
||||||
kompose.version: 1.21.0 (992df58d8)
|
|
||||||
creationTimestamp: null
|
|
||||||
labels:
|
labels:
|
||||||
io.kompose.service: worker-service
|
io.kompose.service: worker-service
|
||||||
name: worker-service
|
name: worker-service
|
||||||
|
|
|
@ -1,56 +1,32 @@
|
||||||
# Default values for budibase.
|
# -- Passed to all pods created by this chart. Should not ordinarily need to be changed.
|
||||||
# This is a YAML-formatted file.
|
|
||||||
# Declare variables to be passed into your templates.
|
|
||||||
|
|
||||||
image:
|
|
||||||
pullPolicy: IfNotPresent
|
|
||||||
# Overrides the image tag whose default is the chart appVersion.
|
|
||||||
tag: ""
|
|
||||||
|
|
||||||
imagePullSecrets: []
|
imagePullSecrets: []
|
||||||
|
# -- Override the name of the deploymen. Defaults to {{ .Chart.Name }}.
|
||||||
nameOverride: ""
|
nameOverride: ""
|
||||||
# fullnameOverride: ""
|
|
||||||
|
|
||||||
serviceAccount:
|
serviceAccount:
|
||||||
# Specifies whether a service account should be created
|
# -- Specifies whether a service account should be created
|
||||||
create: true
|
create: true
|
||||||
# Annotations to add to the service account
|
# -- Annotations to add to the service account
|
||||||
annotations: {}
|
annotations: {}
|
||||||
# The name of the service account to use.
|
# -- The name of the service account to use.
|
||||||
# If not set and create is true, a name is generated using the fullname template
|
# If not set and create is true, a name is generated using the fullname template
|
||||||
name: ""
|
name: ""
|
||||||
|
|
||||||
podAnnotations: {}
|
|
||||||
|
|
||||||
podSecurityContext:
|
|
||||||
{}
|
|
||||||
# fsGroup: 2000
|
|
||||||
|
|
||||||
securityContext:
|
|
||||||
{}
|
|
||||||
# capabilities:
|
|
||||||
# drop:
|
|
||||||
# - ALL
|
|
||||||
# readOnlyRootFilesystem: true
|
|
||||||
# runAsNonRoot: true
|
|
||||||
# runAsUser: 1000
|
|
||||||
|
|
||||||
service:
|
service:
|
||||||
|
# -- Service type for the service that points to the main Budibase proxy pod.
|
||||||
type: ClusterIP
|
type: ClusterIP
|
||||||
|
# -- Port to expose on the service.
|
||||||
port: 10000
|
port: 10000
|
||||||
|
|
||||||
ingress:
|
ingress:
|
||||||
|
# -- Whether to create an Ingress resource pointing to the Budibase proxy.
|
||||||
enabled: true
|
enabled: true
|
||||||
aws: false
|
# -- What ingress class to use.
|
||||||
nginx: true
|
|
||||||
certificateArn: ""
|
|
||||||
className: ""
|
className: ""
|
||||||
annotations:
|
# -- Standard hosts block for the Ingress resource. Defaults to pointing to the Budibase proxy.
|
||||||
kubernetes.io/ingress.class: nginx
|
|
||||||
nginx.ingress.kubernetes.io/client-max-body-size: 150M
|
|
||||||
nginx.ingress.kubernetes.io/proxy-body-size: 50m
|
|
||||||
hosts:
|
hosts:
|
||||||
- host: # change if using custom domain
|
# @ignore
|
||||||
|
- host:
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
pathType: Prefix
|
pathType: Prefix
|
||||||
|
@ -60,361 +36,426 @@ ingress:
|
||||||
port:
|
port:
|
||||||
number: 10000
|
number: 10000
|
||||||
|
|
||||||
autoscaling:
|
awsAlbIngress:
|
||||||
|
# -- Whether to create an ALB Ingress resource pointing to the Budibase proxy. Requires the AWS ALB Ingress Controller.
|
||||||
enabled: false
|
enabled: false
|
||||||
minReplicas: 1
|
# -- If you're wanting to use HTTPS, you'll need to create an ACM certificate and specify the ARN here.
|
||||||
maxReplicas: 100
|
certificateArn: ""
|
||||||
targetCPUUtilizationPercentage: 80
|
|
||||||
# targetMemoryUtilizationPercentage: 80
|
|
||||||
|
|
||||||
nodeSelector: {}
|
|
||||||
|
|
||||||
|
# -- Sets the tolerations for all pods created by this chart. Should not ordinarily need to be changed.
|
||||||
|
# See <https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/> for more information
|
||||||
|
# on tolerations.
|
||||||
tolerations: []
|
tolerations: []
|
||||||
|
|
||||||
|
# -- Sets the affinity for all pods created by this chart. Should not ordinarily
|
||||||
|
# need to be changed. See
|
||||||
|
# <https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/>
|
||||||
|
# for more information on affinity.
|
||||||
affinity: {}
|
affinity: {}
|
||||||
|
|
||||||
globals:
|
globals:
|
||||||
appVersion: "" # Use as an override to .Chart.AppVersion
|
# -- The version of Budibase to deploy. Defaults to what's specified by {{ .Chart.AppVersion }}.
|
||||||
|
# Ends up being used as the image version tag for the apps, proxy, and worker images.
|
||||||
|
appVersion: ""
|
||||||
|
# -- Sets the environment variable BUDIBASE_ENVIRONMENT for the apps and worker pods. Should not
|
||||||
|
# ordinarily need to be changed.
|
||||||
budibaseEnv: PRODUCTION
|
budibaseEnv: PRODUCTION
|
||||||
|
# -- Sets what feature flags are enabled and for which tenants. Should not ordinarily need to be
|
||||||
|
# changed.
|
||||||
tenantFeatureFlags: "*:LICENSING,*:USER_GROUPS,*:ONBOARDING_TOUR"
|
tenantFeatureFlags: "*:LICENSING,*:USER_GROUPS,*:ONBOARDING_TOUR"
|
||||||
|
# -- Whether to enable analytics or not. You can read more about our analytics here:
|
||||||
|
# <https://docs.budibase.com/docs/analytics>.
|
||||||
enableAnalytics: "1"
|
enableAnalytics: "1"
|
||||||
|
# @ignore (only used if enableAnalytics is set to 1)
|
||||||
posthogToken: "phc_bIjZL7oh2GEUd2vqvTBH8WvrX0fWTFQMs6H5KQxiUxU"
|
posthogToken: "phc_bIjZL7oh2GEUd2vqvTBH8WvrX0fWTFQMs6H5KQxiUxU"
|
||||||
selfHosted: "1" # set to 0 for budibase cloud environment, set to 1 for self-hosted setup
|
# @ignore (should not normally need to be changed, we only set this to "0"
|
||||||
multiTenancy: "0" # set to 0 to disable multiple orgs, set to 1 to enable multiple orgs
|
# when deploying to our Cloud environment)
|
||||||
offlineMode: "0" # set to 1 to enable offline mode
|
selfHosted: "1"
|
||||||
|
# @ignore (doesn't work out of the box for self-hosted users, only meant for Budicloud)
|
||||||
|
multiTenancy: "0"
|
||||||
|
# @ignore (only currently used to determine whether to fetch licenses offline or not, should
|
||||||
|
# not normally need to be changed, and only applies to Enterprise customers)
|
||||||
|
offlineMode: "0"
|
||||||
|
# @ignore (only needs to be set in our cloud environment)
|
||||||
accountPortalUrl: ""
|
accountPortalUrl: ""
|
||||||
|
# @ignore (only needs to be set in our cloud environment)
|
||||||
accountPortalApiKey: ""
|
accountPortalApiKey: ""
|
||||||
|
# -- Sets the domain attribute of the cookie that Budibase uses to store session information.
|
||||||
|
# See <https://developer.mozilla.org/en-US/docs/Web/HTTP/Cookies#define_where_cookies_are_sent>
|
||||||
|
# for details on why you might want to set this.
|
||||||
cookieDomain: ""
|
cookieDomain: ""
|
||||||
|
# -- Set the `platformUrl` binding. You can also do this in Settings > Organisation if you are
|
||||||
|
# self-hosting.
|
||||||
platformUrl: ""
|
platformUrl: ""
|
||||||
|
# -- Whether or not to enable doing data migrations over the HTTP API. If this is set to "0",
|
||||||
|
# migrations are run on startup. You shouldn't ordinarily need to change this.
|
||||||
httpMigrations: "0"
|
httpMigrations: "0"
|
||||||
|
# -- Google OAuth settings. These can also be set in the Budibase UI, see
|
||||||
|
# <https://docs.budibase.com/docs/sso-with-google> for details.
|
||||||
google:
|
google:
|
||||||
|
# -- Client ID of your Google OAuth app.
|
||||||
clientId: ""
|
clientId: ""
|
||||||
|
# -- Client secret of your Google OAuth app.
|
||||||
secret: ""
|
secret: ""
|
||||||
|
# -- The maximum number of iterations allows for an automation loop step. You can read more about
|
||||||
|
# looping here: <https://docs.budibase.com/docs/looping>.
|
||||||
automationMaxIterations: "200"
|
automationMaxIterations: "200"
|
||||||
|
|
||||||
createSecrets: true # creates an internal API key, JWT secrets and redis password for you
|
# -- Create an internal API key, JWT secret, object store access key and
|
||||||
|
# secret, and store them in a Kubernetes `Secret`.
|
||||||
|
createSecrets: true
|
||||||
|
|
||||||
# if createSecrets is set to false, you can hard-code your secrets here
|
# -- Used for encrypting API keys and environment variables when stored in the database.
|
||||||
|
# You don't need to set this if `createSecrets` is true.
|
||||||
apiEncryptionKey: ""
|
apiEncryptionKey: ""
|
||||||
|
# -- API key used for internal Budibase API calls. You don't need to set this
|
||||||
|
# if `createSecrets` is true.
|
||||||
internalApiKey: ""
|
internalApiKey: ""
|
||||||
|
# -- Secret used for signing JWTs. You don't need to set this if `createSecrets` is true.
|
||||||
jwtSecret: ""
|
jwtSecret: ""
|
||||||
cdnUrl: ""
|
|
||||||
# fallback values used during live rotation
|
# -- A fallback value for `internalApiKey`. If you're rotating your encryption key, you can
|
||||||
|
# set this to the old value for the duration of the rotation.
|
||||||
internalApiKeyFallback: ""
|
internalApiKeyFallback: ""
|
||||||
|
# -- A fallback value for `jwtSecret`. If you're rotating your JWT secret, you can set this
|
||||||
|
# to the old value for the duration of the rotation.
|
||||||
jwtSecretFallback: ""
|
jwtSecretFallback: ""
|
||||||
|
|
||||||
smtp:
|
smtp:
|
||||||
|
# -- Whether to enable SMTP or not.
|
||||||
enabled: false
|
enabled: false
|
||||||
|
# -- The hostname of your SMTP server.
|
||||||
# globalAgentHttpProxy:
|
host: ""
|
||||||
# globalAgentHttpsProxy:
|
# -- The port of your SMTP server.
|
||||||
# globalAgentNoProxy:
|
port: "587"
|
||||||
|
# -- The email address to use in the "From:" field of emails sent by Budibase.
|
||||||
|
from: ""
|
||||||
|
# -- The username to use when authenticating with your SMTP server.
|
||||||
|
user: ""
|
||||||
|
# -- The password to use when authenticating with your SMTP server.
|
||||||
|
password: ""
|
||||||
|
|
||||||
services:
|
services:
|
||||||
budibaseVersion: latest
|
# -- The DNS suffix to use for service discovery. You only need to change this
|
||||||
|
# if you've configured your cluster to use a different DNS suffix.
|
||||||
dns: cluster.local
|
dns: cluster.local
|
||||||
# tlsRejectUnauthorized: 0
|
|
||||||
|
|
||||||
proxy:
|
proxy:
|
||||||
|
# @ignore (you shouldn't need to change this)
|
||||||
port: 10000
|
port: 10000
|
||||||
|
# -- The number of proxy replicas to run.
|
||||||
replicaCount: 1
|
replicaCount: 1
|
||||||
|
# @ignore (you should never need to change this)
|
||||||
upstreams:
|
upstreams:
|
||||||
apps: "http://app-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.apps.port }}"
|
apps: "http://app-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.apps.port }}"
|
||||||
worker: "http://worker-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.worker.port }}"
|
worker: "http://worker-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.worker.port }}"
|
||||||
minio: "http://minio-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.objectStore.port }}"
|
minio: "http://minio-service.{{ .Release.Namespace }}.svc.{{ .Values.services.dns }}:{{ .Values.services.objectStore.port }}"
|
||||||
couchdb: "http://{{ .Release.Name }}-svc-couchdb:{{ .Values.services.couchdb.port }}"
|
couchdb: "http://{{ .Release.Name }}-svc-couchdb:{{ .Values.services.couchdb.port }}"
|
||||||
|
# -- The resources to use for proxy pods. See
|
||||||
|
# <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/>
|
||||||
|
# for more information on how to set these.
|
||||||
resources: {}
|
resources: {}
|
||||||
|
# -- Startup probe configuration for proxy pods. You shouldn't need to
|
||||||
|
# change this, but if you want to you can find more information here:
|
||||||
|
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
|
||||||
|
# @default -- HTTP health checks.
|
||||||
startupProbe:
|
startupProbe:
|
||||||
|
# @ignore
|
||||||
httpGet:
|
httpGet:
|
||||||
path: /health
|
path: /health
|
||||||
port: 10000
|
port: 10000
|
||||||
scheme: HTTP
|
scheme: HTTP
|
||||||
|
# @ignore
|
||||||
failureThreshold: 30
|
failureThreshold: 30
|
||||||
|
# @ignore
|
||||||
periodSeconds: 3
|
periodSeconds: 3
|
||||||
|
# -- Readiness probe configuration for proxy pods. You shouldn't need to
|
||||||
|
# change this, but if you want to you can find more information here:
|
||||||
|
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
|
||||||
|
# @default -- HTTP health checks.
|
||||||
readinessProbe:
|
readinessProbe:
|
||||||
|
# @ignore
|
||||||
httpGet:
|
httpGet:
|
||||||
path: /health
|
path: /health
|
||||||
port: 10000
|
port: 10000
|
||||||
scheme: HTTP
|
scheme: HTTP
|
||||||
|
# @ignore
|
||||||
periodSeconds: 3
|
periodSeconds: 3
|
||||||
|
# @ignore
|
||||||
failureThreshold: 1
|
failureThreshold: 1
|
||||||
|
# -- Liveness probe configuration for proxy pods. You shouldn't need to
|
||||||
|
# change this, but if you want to you can find more information here:
|
||||||
|
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
|
||||||
|
# @default -- HTTP health checks.
|
||||||
livenessProbe:
|
livenessProbe:
|
||||||
|
# @ignore
|
||||||
httpGet:
|
httpGet:
|
||||||
path: /health
|
path: /health
|
||||||
port: 10000
|
port: 10000
|
||||||
scheme: HTTP
|
scheme: HTTP
|
||||||
|
# @ignore
|
||||||
failureThreshold: 3
|
failureThreshold: 3
|
||||||
|
# @ignore
|
||||||
periodSeconds: 5
|
periodSeconds: 5
|
||||||
# annotations:
|
autoscaling:
|
||||||
# co.elastic.logs/module: nginx
|
# -- Whether to enable horizontal pod autoscaling for the proxy service.
|
||||||
# co.elastic.logs/fileset.stdout: access
|
enabled: false
|
||||||
# co.elastic.logs/fileset.stderr: error
|
minReplicas: 1
|
||||||
|
maxReplicas: 10
|
||||||
|
# -- Target CPU utilization percentage for the proxy service. Note that
|
||||||
|
# for autoscaling to work, you will need to have metrics-server
|
||||||
|
# configured, and resources set for the proxy pods.
|
||||||
|
targetCPUUtilizationPercentage: 80
|
||||||
|
|
||||||
apps:
|
apps:
|
||||||
|
# @ignore (you shouldn't need to change this)
|
||||||
port: 4002
|
port: 4002
|
||||||
|
# -- The number of apps replicas to run.
|
||||||
replicaCount: 1
|
replicaCount: 1
|
||||||
|
# -- The log level for the apps service.
|
||||||
logLevel: info
|
logLevel: info
|
||||||
|
# -- Whether or not to log HTTP requests to the apps service.
|
||||||
httpLogging: 1
|
httpLogging: 1
|
||||||
|
# -- The resources to use for apps pods. See
|
||||||
|
# <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/>
|
||||||
|
# for more information on how to set these.
|
||||||
resources: {}
|
resources: {}
|
||||||
|
# -- Startup probe configuration for apps pods. You shouldn't need to
|
||||||
|
# change this, but if you want to you can find more information here:
|
||||||
|
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
|
||||||
|
# @default -- HTTP health checks.
|
||||||
startupProbe:
|
startupProbe:
|
||||||
|
# @ignore
|
||||||
httpGet:
|
httpGet:
|
||||||
path: /health
|
path: /health
|
||||||
port: 4002
|
port: 4002
|
||||||
scheme: HTTP
|
scheme: HTTP
|
||||||
|
# @ignore
|
||||||
failureThreshold: 30
|
failureThreshold: 30
|
||||||
|
# @ignore
|
||||||
periodSeconds: 3
|
periodSeconds: 3
|
||||||
|
# -- Readiness probe configuration for apps pods. You shouldn't need to
|
||||||
|
# change this, but if you want to you can find more information here:
|
||||||
|
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
|
||||||
|
# @default -- HTTP health checks.
|
||||||
readinessProbe:
|
readinessProbe:
|
||||||
|
# @ignore
|
||||||
httpGet:
|
httpGet:
|
||||||
path: /health
|
path: /health
|
||||||
port: 4002
|
port: 4002
|
||||||
scheme: HTTP
|
scheme: HTTP
|
||||||
|
# @ignore
|
||||||
periodSeconds: 3
|
periodSeconds: 3
|
||||||
|
# @ignore
|
||||||
failureThreshold: 1
|
failureThreshold: 1
|
||||||
|
# -- Liveness probe configuration for apps pods. You shouldn't need to
|
||||||
|
# change this, but if you want to you can find more information here:
|
||||||
|
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
|
||||||
|
# @default -- HTTP health checks.
|
||||||
livenessProbe:
|
livenessProbe:
|
||||||
|
# @ignore
|
||||||
httpGet:
|
httpGet:
|
||||||
path: /health
|
path: /health
|
||||||
port: 4002
|
port: 4002
|
||||||
scheme: HTTP
|
scheme: HTTP
|
||||||
|
# @ignore
|
||||||
failureThreshold: 3
|
failureThreshold: 3
|
||||||
|
# @ignore
|
||||||
periodSeconds: 5
|
periodSeconds: 5
|
||||||
# nodeDebug: "" # set the value of NODE_DEBUG
|
autoscaling:
|
||||||
# annotations:
|
# -- Whether to enable horizontal pod autoscaling for the apps service.
|
||||||
# co.elastic.logs/multiline.type: pattern
|
enabled: false
|
||||||
# co.elastic.logs/multiline.pattern: '^[[:space:]]'
|
minReplicas: 1
|
||||||
# co.elastic.logs/multiline.negate: false
|
maxReplicas: 10
|
||||||
# co.elastic.logs/multiline.match: after
|
# -- Target CPU utilization percentage for the apps service. Note that for
|
||||||
|
# autoscaling to work, you will need to have metrics-server configured,
|
||||||
|
# and resources set for the apps pods.
|
||||||
|
targetCPUUtilizationPercentage: 80
|
||||||
|
|
||||||
worker:
|
worker:
|
||||||
|
# @ignore (you shouldn't need to change this)
|
||||||
port: 4003
|
port: 4003
|
||||||
|
# -- The number of worker replicas to run.
|
||||||
replicaCount: 1
|
replicaCount: 1
|
||||||
|
# -- The log level for the worker service.
|
||||||
logLevel: info
|
logLevel: info
|
||||||
|
# -- Whether or not to log HTTP requests to the worker service.
|
||||||
httpLogging: 1
|
httpLogging: 1
|
||||||
|
# -- The resources to use for worker pods. See
|
||||||
|
# <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/>
|
||||||
|
# for more information on how to set these.
|
||||||
resources: {}
|
resources: {}
|
||||||
|
# -- Startup probe configuration for worker pods. You shouldn't need to
|
||||||
|
# change this, but if you want to you can find more information here:
|
||||||
|
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
|
||||||
|
# @default -- HTTP health checks.
|
||||||
startupProbe:
|
startupProbe:
|
||||||
|
# @ignore
|
||||||
httpGet:
|
httpGet:
|
||||||
path: /health
|
path: /health
|
||||||
port: 4003
|
port: 4003
|
||||||
scheme: HTTP
|
scheme: HTTP
|
||||||
|
# @ignore
|
||||||
failureThreshold: 30
|
failureThreshold: 30
|
||||||
|
# @ignore
|
||||||
periodSeconds: 3
|
periodSeconds: 3
|
||||||
|
# -- Readiness probe configuration for worker pods. You shouldn't need to
|
||||||
|
# change this, but if you want to you can find more information here:
|
||||||
|
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
|
||||||
|
# @default -- HTTP health checks.
|
||||||
readinessProbe:
|
readinessProbe:
|
||||||
|
# @ignore
|
||||||
httpGet:
|
httpGet:
|
||||||
path: /health
|
path: /health
|
||||||
port: 4003
|
port: 4003
|
||||||
scheme: HTTP
|
scheme: HTTP
|
||||||
|
# @ignore
|
||||||
periodSeconds: 3
|
periodSeconds: 3
|
||||||
|
# @ignore
|
||||||
failureThreshold: 1
|
failureThreshold: 1
|
||||||
|
# -- Liveness probe configuration for worker pods. You shouldn't need to
|
||||||
|
# change this, but if you want to you can find more information here:
|
||||||
|
# <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>
|
||||||
|
# @default -- HTTP health checks.
|
||||||
livenessProbe:
|
livenessProbe:
|
||||||
|
# @ignore
|
||||||
httpGet:
|
httpGet:
|
||||||
path: /health
|
path: /health
|
||||||
port: 4003
|
port: 4003
|
||||||
scheme: HTTP
|
scheme: HTTP
|
||||||
|
# @ignore
|
||||||
failureThreshold: 3
|
failureThreshold: 3
|
||||||
|
# @ignore
|
||||||
periodSeconds: 5
|
periodSeconds: 5
|
||||||
# annotations:
|
autoscaling:
|
||||||
# co.elastic.logs/multiline.type: pattern
|
# -- Whether to enable horizontal pod autoscaling for the worker service.
|
||||||
# co.elastic.logs/multiline.pattern: '^[[:space:]]'
|
enabled: false
|
||||||
# co.elastic.logs/multiline.negate: false
|
minReplicas: 1
|
||||||
# co.elastic.logs/multiline.match: after
|
maxReplicas: 10
|
||||||
|
# -- Target CPU utilization percentage for the worker service. Note that
|
||||||
|
# for autoscaling to work, you will need to have metrics-server
|
||||||
|
# configured, and resources set for the worker pods.
|
||||||
|
targetCPUUtilizationPercentage: 80
|
||||||
|
|
||||||
couchdb:
|
couchdb:
|
||||||
|
# -- Whether or not to spin up a CouchDB instance in your cluster. True by
|
||||||
|
# default, and the configuration for the CouchDB instance is under the
|
||||||
|
# `couchdb` key at the root of this file. You can see what options are
|
||||||
|
# available to you by looking at the official CouchDB Helm chart:
|
||||||
|
# <https://github.com/apache/couchdb-helm/tree/couchdb-4.3.0/couchdb>.
|
||||||
enabled: true
|
enabled: true
|
||||||
# url: "" # only change if pointing to existing couch server
|
# url: "" # only change if pointing to existing couch server
|
||||||
# user: "" # only change if pointing to existing couch server
|
# user: "" # only change if pointing to existing couch server
|
||||||
# password: "" # only change if pointing to existing couch server
|
# password: "" # only change if pointing to existing couch server
|
||||||
port: 5984
|
port: 5984
|
||||||
backup:
|
backup:
|
||||||
|
# -- Whether or not to enable periodic CouchDB backups. This works by replicating
|
||||||
|
# to another CouchDB instance.
|
||||||
enabled: false
|
enabled: false
|
||||||
# target couchDB instance to back up to
|
# -- Target couchDB instance to back up to, either a hostname or an IP address.
|
||||||
target: ""
|
target: ""
|
||||||
# backup interval in seconds
|
# -- Backup interval in seconds
|
||||||
interval: ""
|
interval: ""
|
||||||
|
# -- The resources to use for CouchDB backup pods. See
|
||||||
|
# <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/>
|
||||||
|
# for more information on how to set these.
|
||||||
resources: {}
|
resources: {}
|
||||||
|
|
||||||
redis:
|
redis:
|
||||||
enabled: true # disable if using external redis
|
# -- Whether or not to deploy a Redis pod into your cluster.
|
||||||
|
enabled: true
|
||||||
|
# -- Port to expose Redis on.
|
||||||
port: 6379
|
port: 6379
|
||||||
|
# @ignore (you should leave this as 1, we don't support clustering Redis)
|
||||||
replicaCount: 1
|
replicaCount: 1
|
||||||
url: "" # only change if pointing to existing redis cluster and enabled: false
|
# -- If you choose to run Redis externally to this chart, you can specify the
|
||||||
password: "budibase" # recommended to override if using built-in redis
|
# connection details here.
|
||||||
|
url: ""
|
||||||
|
# -- The password to use when connecting to Redis. It's recommended that you change
|
||||||
|
# this from the default if you're running Redis in-cluster.
|
||||||
|
password: "budibase"
|
||||||
|
# -- How much persistent storage to allocate to Redis.
|
||||||
storage: 100Mi
|
storage: 100Mi
|
||||||
## If defined, storageClassName: <storageClass>
|
# -- If defined, storageClassName: <storageClass> If set to "-",
|
||||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
# storageClassName: "", which disables dynamic provisioning If undefined
|
||||||
## If undefined (the default) or set to null, no storageClassName spec is
|
# (the default) or set to null, no storageClassName spec is set, choosing
|
||||||
## set, choosing the default provisioner.
|
# the default provisioner.
|
||||||
storageClass: ""
|
storageClass: ""
|
||||||
|
# -- The resources to use for Redis pods. See
|
||||||
|
# <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/>
|
||||||
|
# for more information on how to set these.
|
||||||
resources: {}
|
resources: {}
|
||||||
|
|
||||||
objectStore:
|
objectStore:
|
||||||
# Set to false if using another object store such as S3
|
# -- Set to false if using another object store, such as S3. You will need
|
||||||
|
# to set `services.objectStore.url` to point to your bucket if you do this.
|
||||||
minio: true
|
minio: true
|
||||||
|
# -- Whether to enable the Minio web console or not. If you're exposing
|
||||||
|
# Minio to the Internet (via a custom Ingress record, for example), you
|
||||||
|
# should set this to false. If you're only exposing Minio to your cluster,
|
||||||
|
# you can leave this as true.
|
||||||
browser: true
|
browser: true
|
||||||
|
# @ignore
|
||||||
port: 9000
|
port: 9000
|
||||||
|
# @ignore (you should leave this as 1, we don't support clustering Minio)
|
||||||
replicaCount: 1
|
replicaCount: 1
|
||||||
accessKey: "" # AWS_ACCESS_KEY if using S3 or existing minio access key
|
# -- AWS_ACCESS_KEY if using S3
|
||||||
secretKey: "" # AWS_SECRET_ACCESS_KEY if using S3 or existing minio secret
|
accessKey: ""
|
||||||
region: "" # AWS_REGION if using S3 or existing minio secret
|
# -- AWS_SECRET_ACCESS_KEY if using S3
|
||||||
url: "http://minio-service:9000" # only change if pointing to existing minio cluster or S3 and minio: false
|
secretKey: ""
|
||||||
|
# -- AWS_REGION if using S3
|
||||||
|
region: ""
|
||||||
|
# -- URL to use for object storage. Only change this if you're using an
|
||||||
|
# external object store, such as S3. Remember to set `minio: false` if you
|
||||||
|
# do this.
|
||||||
|
url: "http://minio-service:9000"
|
||||||
|
# -- How much storage to give Minio in its PersistentVolumeClaim.
|
||||||
storage: 100Mi
|
storage: 100Mi
|
||||||
## If defined, storageClassName: <storageClass>
|
# -- If defined, storageClassName: <storageClass> If set to "-",
|
||||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
# storageClassName: "", which disables dynamic provisioning If undefined
|
||||||
## If undefined (the default) or set to null, no storageClassName spec is
|
# (the default) or set to null, no storageClassName spec is set, choosing
|
||||||
## set, choosing the default provisioner.
|
# the default provisioner.
|
||||||
storageClass: ""
|
storageClass: ""
|
||||||
|
# -- The resources to use for Minio pods. See
|
||||||
|
# <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/>
|
||||||
|
# for more information on how to set these.
|
||||||
resources: {}
|
resources: {}
|
||||||
cloudfront:
|
cloudfront:
|
||||||
# Set the url of a distribution to enable cloudfront
|
# -- Set the url of a distribution to enable cloudfront.
|
||||||
cdn: ""
|
cdn: ""
|
||||||
# ID of public key stored in cloudfront
|
# -- ID of public key stored in cloudfront.
|
||||||
publicKeyId: ""
|
publicKeyId: ""
|
||||||
# Base64 encoded private key for the above public key
|
# -- Base64 encoded private key for the above public key.
|
||||||
privateKey64: ""
|
privateKey64: ""
|
||||||
|
|
||||||
# Override values in couchDB subchart
|
# Override values in couchDB subchart. We're only specifying the values we're changing.
|
||||||
|
# If you want to see all of the available values, see:
|
||||||
|
# https://github.com/apache/couchdb-helm/tree/couchdb-4.3.0/couchdb
|
||||||
couchdb:
|
couchdb:
|
||||||
## clusterSize is the initial size of the CouchDB cluster.
|
# -- The number of replicas to run in the CouchDB cluster. We set this to
|
||||||
|
# 1 by default to make things simpler, but you can set it to 3 if you need
|
||||||
|
# a high-availability CouchDB cluster.
|
||||||
clusterSize: 1
|
clusterSize: 1
|
||||||
allowAdminParty: false
|
|
||||||
|
|
||||||
# Secret Management
|
# -- We use a custom CouchDB image for running Budibase and we don't support
|
||||||
createAdminSecret: true
|
# using any other CouchDB image. You shouldn't change this, and if you do we
|
||||||
|
# can't guarantee that Budibase will work.
|
||||||
# adminUsername: budibase
|
|
||||||
# adminPassword: budibase
|
|
||||||
# adminHash: -pbkdf2-this_is_not_necessarily_secure_either
|
|
||||||
# cookieAuthSecret: admin
|
|
||||||
|
|
||||||
## When enabled, will deploy a networkpolicy that allows CouchDB pods to
|
|
||||||
## communicate with each other for clustering and ingress on port 5984
|
|
||||||
networkPolicy:
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
# Use a service account
|
|
||||||
serviceAccount:
|
|
||||||
enabled: true
|
|
||||||
create: true
|
|
||||||
# name:
|
|
||||||
# imagePullSecrets:
|
|
||||||
# - name: myimagepullsecret
|
|
||||||
|
|
||||||
## The storage volume used by each Pod in the StatefulSet. If a
|
|
||||||
## persistentVolume is not enabled, the Pods will use `emptyDir` ephemeral
|
|
||||||
## local storage. Setting the storageClass attribute to "-" disables dynamic
|
|
||||||
## provisioning of Persistent Volumes; leaving it unset will invoke the default
|
|
||||||
## provisioner.
|
|
||||||
persistentVolume:
|
|
||||||
enabled: false
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
size: 10Gi
|
|
||||||
storageClass: ""
|
|
||||||
|
|
||||||
## The CouchDB image
|
|
||||||
image:
|
image:
|
||||||
repository: couchdb
|
# @ignore
|
||||||
tag: 3.1.1
|
repository: budibase/couchdb
|
||||||
pullPolicy: IfNotPresent
|
# @ignore
|
||||||
|
tag: v3.2.1
|
||||||
## Experimental integration with Lucene-powered fulltext search
|
# @ignore
|
||||||
enableSearch: true
|
|
||||||
searchImage:
|
|
||||||
repository: kocolosk/couchdb-search
|
|
||||||
tag: 0.2.0
|
|
||||||
pullPolicy: IfNotPresent
|
|
||||||
|
|
||||||
initImage:
|
|
||||||
repository: busybox
|
|
||||||
tag: latest
|
|
||||||
pullPolicy: Always
|
pullPolicy: Always
|
||||||
|
|
||||||
## CouchDB is happy to spin up cluster nodes in parallel, but if you encounter
|
# @ignore
|
||||||
## problems you can try setting podManagementPolicy to the StatefulSet default
|
# This should remain false. We ship Clouseau ourselves as part of the
|
||||||
## `OrderedReady`
|
# budibase/couchdb image, and it's not possible to disable it because it's a
|
||||||
podManagementPolicy: Parallel
|
# core part of the Budibase experience.
|
||||||
|
enableSearch: false
|
||||||
|
|
||||||
## Optional pod annotations
|
|
||||||
annotations: {}
|
|
||||||
|
|
||||||
## Optional tolerations
|
|
||||||
tolerations: []
|
|
||||||
|
|
||||||
affinity: {}
|
|
||||||
|
|
||||||
service:
|
|
||||||
# annotations:
|
|
||||||
enabled: true
|
|
||||||
type: ClusterIP
|
|
||||||
externalPort: 5984
|
|
||||||
|
|
||||||
## An Ingress resource can provide name-based virtual hosting and TLS
|
|
||||||
## termination among other things for CouchDB deployments which are accessed
|
|
||||||
## from outside the Kubernetes cluster.
|
|
||||||
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
hosts:
|
|
||||||
- chart-example.local
|
|
||||||
path: /
|
|
||||||
annotations:
|
|
||||||
[]
|
|
||||||
# kubernetes.io/ingress.class: nginx
|
|
||||||
# kubernetes.io/tls-acme: "true"
|
|
||||||
tls:
|
|
||||||
# Secrets must be manually created in the namespace.
|
|
||||||
# - secretName: chart-example-tls
|
|
||||||
# hosts:
|
|
||||||
# - chart-example.local
|
|
||||||
|
|
||||||
## Optional resource requests and limits for the CouchDB container
|
|
||||||
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
|
||||||
resources:
|
|
||||||
{}
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
# limits:
|
|
||||||
# cpu: 56
|
|
||||||
# memory: 256Gi
|
|
||||||
|
|
||||||
## erlangFlags is a map that is passed to the Erlang VM as flags using the
|
|
||||||
## ERL_FLAGS env. `name` and `setcookie` flags are minimally required to
|
|
||||||
## establish connectivity between cluster nodes.
|
|
||||||
## ref: http://erlang.org/doc/man/erl.html#init_flags
|
|
||||||
erlangFlags:
|
|
||||||
name: couchdb
|
|
||||||
setcookie: monster
|
|
||||||
|
|
||||||
## couchdbConfig will override default CouchDB configuration settings.
|
|
||||||
## The contents of this map are reformatted into a .ini file laid down
|
|
||||||
## by a ConfigMap object.
|
|
||||||
## ref: http://docs.couchdb.org/en/latest/config/index.html
|
|
||||||
couchdbConfig:
|
couchdbConfig:
|
||||||
couchdb:
|
couchdb:
|
||||||
uuid: budibase-couchdb # REQUIRED: Unique identifier for this CouchDB server instance
|
# -- Unique identifier for this CouchDB server instance. You shouldn't need
|
||||||
# cluster:
|
# to change this.
|
||||||
# q: 8 # Create 8 shards for each database
|
uuid: budibase-couchdb
|
||||||
chttpd:
|
|
||||||
bind_address: any
|
|
||||||
# chttpd.require_valid_user disables all the anonymous requests to the port
|
|
||||||
# 5984 when is set to true.
|
|
||||||
require_valid_user: false
|
|
||||||
|
|
||||||
# Kubernetes local cluster domain.
|
|
||||||
# This is used to generate FQDNs for peers when joining the CouchDB cluster.
|
|
||||||
dns:
|
|
||||||
clusterDomainSuffix: cluster.local
|
|
||||||
|
|
||||||
## Configure liveness and readiness probe values
|
|
||||||
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
|
|
||||||
# FOR COUCHDB
|
|
||||||
livenessProbe:
|
|
||||||
failureThreshold: 3
|
|
||||||
initialDelaySeconds: 0
|
|
||||||
periodSeconds: 10
|
|
||||||
successThreshold: 1
|
|
||||||
timeoutSeconds: 1
|
|
||||||
readinessProbe:
|
|
||||||
failureThreshold: 3
|
|
||||||
initialDelaySeconds: 0
|
|
||||||
periodSeconds: 10
|
|
||||||
successThreshold: 1
|
|
||||||
timeoutSeconds: 1
|
|
||||||
|
|
|
@ -42,7 +42,7 @@ http {
|
||||||
server {
|
server {
|
||||||
listen 10000 default_server;
|
listen 10000 default_server;
|
||||||
server_name _;
|
server_name _;
|
||||||
client_max_body_size 1000m;
|
client_max_body_size 50000m;
|
||||||
ignore_invalid_headers off;
|
ignore_invalid_headers off;
|
||||||
proxy_buffering off;
|
proxy_buffering off;
|
||||||
|
|
||||||
|
|
|
@ -249,4 +249,30 @@ http {
|
||||||
gzip_comp_level 6;
|
gzip_comp_level 6;
|
||||||
gzip_types text/plain text/css text/xml application/json application/javascript application/rss+xml application/atom+xml image/svg+xml;
|
gzip_types text/plain text/css text/xml application/json application/javascript application/rss+xml application/atom+xml image/svg+xml;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# From https://docs.datadoghq.com/integrations/nginx/?tab=kubernetes
|
||||||
|
server {
|
||||||
|
listen 81;
|
||||||
|
server_name localhost;
|
||||||
|
|
||||||
|
access_log off;
|
||||||
|
allow 127.0.0.1;
|
||||||
|
deny all;
|
||||||
|
|
||||||
|
location /nginx_status {
|
||||||
|
# Choose your status module
|
||||||
|
|
||||||
|
# freely available with open source NGINX
|
||||||
|
stub_status;
|
||||||
|
|
||||||
|
# for open source NGINX < version 1.7.5
|
||||||
|
# stub_status on;
|
||||||
|
|
||||||
|
# available only with NGINX Plus
|
||||||
|
# status;
|
||||||
|
|
||||||
|
# ensures the version information can be retrieved
|
||||||
|
server_tokens on;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
{
|
{
|
||||||
"version": "2.13.19",
|
"version": "2.13.35",
|
||||||
"npmClient": "yarn",
|
"npmClient": "yarn",
|
||||||
"packages": [
|
"packages": [
|
||||||
"packages/*"
|
"packages/*"
|
||||||
|
|
|
@ -72,7 +72,7 @@
|
||||||
"@types/tar-fs": "2.0.1",
|
"@types/tar-fs": "2.0.1",
|
||||||
"@types/uuid": "8.3.4",
|
"@types/uuid": "8.3.4",
|
||||||
"chance": "1.1.8",
|
"chance": "1.1.8",
|
||||||
"ioredis-mock": "8.7.0",
|
"ioredis-mock": "8.9.0",
|
||||||
"jest": "29.6.2",
|
"jest": "29.6.2",
|
||||||
"jest-environment-node": "29.6.2",
|
"jest-environment-node": "29.6.2",
|
||||||
"jest-serial-runner": "1.2.1",
|
"jest-serial-runner": "1.2.1",
|
||||||
|
|
|
@ -19,6 +19,7 @@ import {
|
||||||
GoogleInnerConfig,
|
GoogleInnerConfig,
|
||||||
OIDCInnerConfig,
|
OIDCInnerConfig,
|
||||||
PlatformLogoutOpts,
|
PlatformLogoutOpts,
|
||||||
|
SessionCookie,
|
||||||
SSOProviderType,
|
SSOProviderType,
|
||||||
} from "@budibase/types"
|
} from "@budibase/types"
|
||||||
import * as events from "../events"
|
import * as events from "../events"
|
||||||
|
@ -44,7 +45,6 @@ export const buildAuthMiddleware = authenticated
|
||||||
export const buildTenancyMiddleware = tenancy
|
export const buildTenancyMiddleware = tenancy
|
||||||
export const buildCsrfMiddleware = csrf
|
export const buildCsrfMiddleware = csrf
|
||||||
export const passport = _passport
|
export const passport = _passport
|
||||||
export const jwt = require("jsonwebtoken")
|
|
||||||
|
|
||||||
// Strategies
|
// Strategies
|
||||||
_passport.use(new LocalStrategy(local.options, local.authenticate))
|
_passport.use(new LocalStrategy(local.options, local.authenticate))
|
||||||
|
@ -191,10 +191,10 @@ export async function platformLogout(opts: PlatformLogoutOpts) {
|
||||||
|
|
||||||
if (!ctx) throw new Error("Koa context must be supplied to logout.")
|
if (!ctx) throw new Error("Koa context must be supplied to logout.")
|
||||||
|
|
||||||
const currentSession = getCookie(ctx, Cookie.Auth)
|
const currentSession = getCookie<SessionCookie>(ctx, Cookie.Auth)
|
||||||
let sessions = await getSessionsForUser(userId)
|
let sessions = await getSessionsForUser(userId)
|
||||||
|
|
||||||
if (keepActiveSession) {
|
if (currentSession && keepActiveSession) {
|
||||||
sessions = sessions.filter(
|
sessions = sessions.filter(
|
||||||
session => session.sessionId !== currentSession.sessionId
|
session => session.sessionId !== currentSession.sessionId
|
||||||
)
|
)
|
||||||
|
|
|
@ -99,6 +99,8 @@ function updateContext(updates: ContextMap): ContextMap {
|
||||||
}
|
}
|
||||||
|
|
||||||
async function newContext<T>(updates: ContextMap, task: () => T) {
|
async function newContext<T>(updates: ContextMap, task: () => T) {
|
||||||
|
guardMigration()
|
||||||
|
|
||||||
// see if there already is a context setup
|
// see if there already is a context setup
|
||||||
let context: ContextMap = updateContext(updates)
|
let context: ContextMap = updateContext(updates)
|
||||||
return Context.run(context, task)
|
return Context.run(context, task)
|
||||||
|
@ -145,23 +147,27 @@ export async function doInTenant<T>(
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function doInAppContext<T>(
|
export async function doInAppContext<T>(
|
||||||
appId: string | null,
|
appId: string,
|
||||||
task: () => T
|
task: () => T
|
||||||
): Promise<T> {
|
): Promise<T> {
|
||||||
if (!appId && !env.isTest()) {
|
return _doInAppContext(appId, task)
|
||||||
|
}
|
||||||
|
|
||||||
|
async function _doInAppContext<T>(
|
||||||
|
appId: string,
|
||||||
|
task: () => T,
|
||||||
|
extraContextSettings?: ContextMap
|
||||||
|
): Promise<T> {
|
||||||
|
if (!appId) {
|
||||||
throw new Error("appId is required")
|
throw new Error("appId is required")
|
||||||
}
|
}
|
||||||
|
|
||||||
let updates: ContextMap
|
const tenantId = getTenantIDFromAppID(appId)
|
||||||
if (!appId) {
|
const updates: ContextMap = { appId, ...extraContextSettings }
|
||||||
updates = { appId: "" }
|
if (tenantId) {
|
||||||
} else {
|
updates.tenantId = tenantId
|
||||||
const tenantId = getTenantIDFromAppID(appId)
|
|
||||||
updates = { appId }
|
|
||||||
if (tenantId) {
|
|
||||||
updates.tenantId = tenantId
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return newContext(updates, task)
|
return newContext(updates, task)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -182,6 +188,24 @@ export async function doInIdentityContext<T>(
|
||||||
return newContext(context, task)
|
return newContext(context, task)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function guardMigration() {
|
||||||
|
const context = Context.get()
|
||||||
|
if (context?.isMigrating) {
|
||||||
|
throw new Error(
|
||||||
|
"The context cannot be changed, a migration is currently running"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function doInAppMigrationContext<T>(
|
||||||
|
appId: string,
|
||||||
|
task: () => T
|
||||||
|
): Promise<T> {
|
||||||
|
return _doInAppContext(appId, task, {
|
||||||
|
isMigrating: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
export function getIdentity(): IdentityContext | undefined {
|
export function getIdentity(): IdentityContext | undefined {
|
||||||
try {
|
try {
|
||||||
const context = Context.get()
|
const context = Context.get()
|
||||||
|
|
|
@ -1,6 +1,11 @@
|
||||||
import { testEnv } from "../../../tests/extra"
|
import { testEnv } from "../../../tests/extra"
|
||||||
import * as context from "../"
|
import * as context from "../"
|
||||||
import { DEFAULT_TENANT_ID } from "../../constants"
|
import { DEFAULT_TENANT_ID } from "../../constants"
|
||||||
|
import { structures } from "../../../tests"
|
||||||
|
import { db } from "../.."
|
||||||
|
import Context from "../Context"
|
||||||
|
import { ContextMap } from "../types"
|
||||||
|
import { IdentityType } from "@budibase/types"
|
||||||
|
|
||||||
describe("context", () => {
|
describe("context", () => {
|
||||||
describe("doInTenant", () => {
|
describe("doInTenant", () => {
|
||||||
|
@ -144,4 +149,107 @@ describe("context", () => {
|
||||||
expect(isScim).toBe(false)
|
expect(isScim).toBe(false)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
describe("doInAppMigrationContext", () => {
|
||||||
|
it("the context is set correctly", async () => {
|
||||||
|
const appId = db.generateAppID()
|
||||||
|
|
||||||
|
await context.doInAppMigrationContext(appId, () => {
|
||||||
|
const context = Context.get()
|
||||||
|
|
||||||
|
const expected: ContextMap = {
|
||||||
|
appId,
|
||||||
|
isMigrating: true,
|
||||||
|
}
|
||||||
|
expect(context).toEqual(expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it("the context is set correctly when running in a tenant id", async () => {
|
||||||
|
const tenantId = structures.tenant.id()
|
||||||
|
const appId = db.generateAppID(tenantId)
|
||||||
|
|
||||||
|
await context.doInAppMigrationContext(appId, () => {
|
||||||
|
const context = Context.get()
|
||||||
|
|
||||||
|
const expected: ContextMap = {
|
||||||
|
appId,
|
||||||
|
isMigrating: true,
|
||||||
|
tenantId,
|
||||||
|
}
|
||||||
|
expect(context).toEqual(expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it("the context is not modified outside the delegate", async () => {
|
||||||
|
const appId = db.generateAppID()
|
||||||
|
|
||||||
|
expect(Context.get()).toBeUndefined()
|
||||||
|
|
||||||
|
await context.doInAppMigrationContext(appId, () => {
|
||||||
|
const context = Context.get()
|
||||||
|
|
||||||
|
const expected: ContextMap = {
|
||||||
|
appId,
|
||||||
|
isMigrating: true,
|
||||||
|
}
|
||||||
|
expect(context).toEqual(expected)
|
||||||
|
})
|
||||||
|
|
||||||
|
expect(Context.get()).toBeUndefined()
|
||||||
|
})
|
||||||
|
|
||||||
|
it.each([
|
||||||
|
[
|
||||||
|
"doInAppMigrationContext",
|
||||||
|
() => context.doInAppMigrationContext(db.generateAppID(), () => {}),
|
||||||
|
],
|
||||||
|
[
|
||||||
|
"doInAppContext",
|
||||||
|
() => context.doInAppContext(db.generateAppID(), () => {}),
|
||||||
|
],
|
||||||
|
[
|
||||||
|
"doInAutomationContext",
|
||||||
|
() =>
|
||||||
|
context.doInAutomationContext({
|
||||||
|
appId: db.generateAppID(),
|
||||||
|
automationId: structures.generator.guid(),
|
||||||
|
task: () => {},
|
||||||
|
}),
|
||||||
|
],
|
||||||
|
["doInContext", () => context.doInContext(db.generateAppID(), () => {})],
|
||||||
|
[
|
||||||
|
"doInEnvironmentContext",
|
||||||
|
() => context.doInEnvironmentContext({}, () => {}),
|
||||||
|
],
|
||||||
|
[
|
||||||
|
"doInIdentityContext",
|
||||||
|
() =>
|
||||||
|
context.doInIdentityContext(
|
||||||
|
{
|
||||||
|
account: undefined,
|
||||||
|
type: IdentityType.USER,
|
||||||
|
_id: structures.users.user()._id!,
|
||||||
|
},
|
||||||
|
() => {}
|
||||||
|
),
|
||||||
|
],
|
||||||
|
["doInScimContext", () => context.doInScimContext(() => {})],
|
||||||
|
[
|
||||||
|
"doInTenant",
|
||||||
|
() => context.doInTenant(structures.tenant.id(), () => {}),
|
||||||
|
],
|
||||||
|
])(
|
||||||
|
"a nested context.%s function cannot run",
|
||||||
|
async (_, otherContextCall: () => Promise<void>) => {
|
||||||
|
await expect(
|
||||||
|
context.doInAppMigrationContext(db.generateAppID(), async () => {
|
||||||
|
await otherContextCall()
|
||||||
|
})
|
||||||
|
).rejects.toThrowError(
|
||||||
|
"The context cannot be changed, a migration is currently running"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
|
@ -8,4 +8,5 @@ export type ContextMap = {
|
||||||
environmentVariables?: Record<string, string>
|
environmentVariables?: Record<string, string>
|
||||||
isScim?: boolean
|
isScim?: boolean
|
||||||
automationId?: string
|
automationId?: string
|
||||||
|
isMigrating?: boolean
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,7 @@ import { getGlobalDB, doInTenant } from "../context"
|
||||||
import { decrypt } from "../security/encryption"
|
import { decrypt } from "../security/encryption"
|
||||||
import * as identity from "../context/identity"
|
import * as identity from "../context/identity"
|
||||||
import env from "../environment"
|
import env from "../environment"
|
||||||
import { Ctx, EndpointMatcher } from "@budibase/types"
|
import { Ctx, EndpointMatcher, SessionCookie } from "@budibase/types"
|
||||||
import { InvalidAPIKeyError, ErrorCode } from "../errors"
|
import { InvalidAPIKeyError, ErrorCode } from "../errors"
|
||||||
|
|
||||||
const ONE_MINUTE = env.SESSION_UPDATE_PERIOD
|
const ONE_MINUTE = env.SESSION_UPDATE_PERIOD
|
||||||
|
@ -98,7 +98,9 @@ export default function (
|
||||||
// check the actual user is authenticated first, try header or cookie
|
// check the actual user is authenticated first, try header or cookie
|
||||||
let headerToken = ctx.request.headers[Header.TOKEN]
|
let headerToken = ctx.request.headers[Header.TOKEN]
|
||||||
|
|
||||||
const authCookie = getCookie(ctx, Cookie.Auth) || openJwt(headerToken)
|
const authCookie =
|
||||||
|
getCookie<SessionCookie>(ctx, Cookie.Auth) ||
|
||||||
|
openJwt<SessionCookie>(headerToken)
|
||||||
let apiKey = ctx.request.headers[Header.API_KEY]
|
let apiKey = ctx.request.headers[Header.API_KEY]
|
||||||
|
|
||||||
if (!apiKey && ctx.request.headers[Header.AUTHORIZATION]) {
|
if (!apiKey && ctx.request.headers[Header.AUTHORIZATION]) {
|
||||||
|
|
|
@ -3,7 +3,7 @@ import { Cookie } from "../../../constants"
|
||||||
import * as configs from "../../../configs"
|
import * as configs from "../../../configs"
|
||||||
import * as cache from "../../../cache"
|
import * as cache from "../../../cache"
|
||||||
import * as utils from "../../../utils"
|
import * as utils from "../../../utils"
|
||||||
import { UserCtx, SSOProfile } from "@budibase/types"
|
import { UserCtx, SSOProfile, DatasourceAuthCookie } from "@budibase/types"
|
||||||
import { ssoSaveUserNoOp } from "../sso/sso"
|
import { ssoSaveUserNoOp } from "../sso/sso"
|
||||||
|
|
||||||
const GoogleStrategy = require("passport-google-oauth").OAuth2Strategy
|
const GoogleStrategy = require("passport-google-oauth").OAuth2Strategy
|
||||||
|
@ -58,7 +58,14 @@ export async function postAuth(
|
||||||
const platformUrl = await configs.getPlatformUrl({ tenantAware: false })
|
const platformUrl = await configs.getPlatformUrl({ tenantAware: false })
|
||||||
|
|
||||||
let callbackUrl = `${platformUrl}/api/global/auth/datasource/google/callback`
|
let callbackUrl = `${platformUrl}/api/global/auth/datasource/google/callback`
|
||||||
const authStateCookie = utils.getCookie(ctx, Cookie.DatasourceAuth)
|
const authStateCookie = utils.getCookie<{ appId: string }>(
|
||||||
|
ctx,
|
||||||
|
Cookie.DatasourceAuth
|
||||||
|
)
|
||||||
|
|
||||||
|
if (!authStateCookie) {
|
||||||
|
throw new Error("Unable to fetch datasource auth cookie")
|
||||||
|
}
|
||||||
|
|
||||||
return passport.authenticate(
|
return passport.authenticate(
|
||||||
new GoogleStrategy(
|
new GoogleStrategy(
|
||||||
|
|
|
@ -305,20 +305,33 @@ export async function retrieveDirectory(bucketName: string, path: string) {
|
||||||
let writePath = join(budibaseTempDir(), v4())
|
let writePath = join(budibaseTempDir(), v4())
|
||||||
fs.mkdirSync(writePath)
|
fs.mkdirSync(writePath)
|
||||||
const objects = await listAllObjects(bucketName, path)
|
const objects = await listAllObjects(bucketName, path)
|
||||||
let fullObjects = await Promise.all(
|
let streams = await Promise.all(
|
||||||
objects.map(obj => retrieve(bucketName, obj.Key!))
|
objects.map(obj => getReadStream(bucketName, obj.Key!))
|
||||||
)
|
)
|
||||||
let count = 0
|
let count = 0
|
||||||
|
const writePromises: Promise<Error>[] = []
|
||||||
for (let obj of objects) {
|
for (let obj of objects) {
|
||||||
const filename = obj.Key!
|
const filename = obj.Key!
|
||||||
const data = fullObjects[count++]
|
const stream = streams[count++]
|
||||||
const possiblePath = filename.split("/")
|
const possiblePath = filename.split("/")
|
||||||
if (possiblePath.length > 1) {
|
const dirs = possiblePath.slice(0, possiblePath.length - 1)
|
||||||
const dirs = possiblePath.slice(0, possiblePath.length - 1)
|
const possibleDir = join(writePath, ...dirs)
|
||||||
fs.mkdirSync(join(writePath, ...dirs), { recursive: true })
|
if (possiblePath.length > 1 && !fs.existsSync(possibleDir)) {
|
||||||
|
fs.mkdirSync(possibleDir, { recursive: true })
|
||||||
}
|
}
|
||||||
fs.writeFileSync(join(writePath, ...possiblePath), data)
|
const writeStream = fs.createWriteStream(join(writePath, ...possiblePath), {
|
||||||
|
mode: 0o644,
|
||||||
|
})
|
||||||
|
stream.pipe(writeStream)
|
||||||
|
writePromises.push(
|
||||||
|
new Promise((resolve, reject) => {
|
||||||
|
stream.on("finish", resolve)
|
||||||
|
stream.on("error", reject)
|
||||||
|
writeStream.on("error", reject)
|
||||||
|
})
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
await Promise.all(writePromises)
|
||||||
return writePath
|
return writePath
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,8 +2,9 @@ import Redlock from "redlock"
|
||||||
import { getLockClient } from "./init"
|
import { getLockClient } from "./init"
|
||||||
import { LockOptions, LockType } from "@budibase/types"
|
import { LockOptions, LockType } from "@budibase/types"
|
||||||
import * as context from "../context"
|
import * as context from "../context"
|
||||||
import env from "../environment"
|
|
||||||
import { logWarn } from "../logging"
|
import { logWarn } from "../logging"
|
||||||
|
import { utils } from "@budibase/shared-core"
|
||||||
|
import { Duration } from "../utils"
|
||||||
|
|
||||||
async function getClient(
|
async function getClient(
|
||||||
type: LockType,
|
type: LockType,
|
||||||
|
@ -12,9 +13,7 @@ async function getClient(
|
||||||
if (type === LockType.CUSTOM) {
|
if (type === LockType.CUSTOM) {
|
||||||
return newRedlock(opts)
|
return newRedlock(opts)
|
||||||
}
|
}
|
||||||
if (env.isTest() && type !== LockType.TRY_ONCE) {
|
|
||||||
return newRedlock(OPTIONS.TEST)
|
|
||||||
}
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case LockType.TRY_ONCE: {
|
case LockType.TRY_ONCE: {
|
||||||
return newRedlock(OPTIONS.TRY_ONCE)
|
return newRedlock(OPTIONS.TRY_ONCE)
|
||||||
|
@ -28,13 +27,16 @@ async function getClient(
|
||||||
case LockType.DELAY_500: {
|
case LockType.DELAY_500: {
|
||||||
return newRedlock(OPTIONS.DELAY_500)
|
return newRedlock(OPTIONS.DELAY_500)
|
||||||
}
|
}
|
||||||
|
case LockType.AUTO_EXTEND: {
|
||||||
|
return newRedlock(OPTIONS.AUTO_EXTEND)
|
||||||
|
}
|
||||||
default: {
|
default: {
|
||||||
throw new Error(`Could not get redlock client: ${type}`)
|
throw utils.unreachable(type)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const OPTIONS = {
|
const OPTIONS: Record<keyof typeof LockType, Redlock.Options> = {
|
||||||
TRY_ONCE: {
|
TRY_ONCE: {
|
||||||
// immediately throws an error if the lock is already held
|
// immediately throws an error if the lock is already held
|
||||||
retryCount: 0,
|
retryCount: 0,
|
||||||
|
@ -42,11 +44,6 @@ const OPTIONS = {
|
||||||
TRY_TWICE: {
|
TRY_TWICE: {
|
||||||
retryCount: 1,
|
retryCount: 1,
|
||||||
},
|
},
|
||||||
TEST: {
|
|
||||||
// higher retry count in unit tests
|
|
||||||
// due to high contention.
|
|
||||||
retryCount: 100,
|
|
||||||
},
|
|
||||||
DEFAULT: {
|
DEFAULT: {
|
||||||
// the expected clock drift; for more details
|
// the expected clock drift; for more details
|
||||||
// see http://redis.io/topics/distlock
|
// see http://redis.io/topics/distlock
|
||||||
|
@ -67,10 +64,14 @@ const OPTIONS = {
|
||||||
DELAY_500: {
|
DELAY_500: {
|
||||||
retryDelay: 500,
|
retryDelay: 500,
|
||||||
},
|
},
|
||||||
|
CUSTOM: {},
|
||||||
|
AUTO_EXTEND: {
|
||||||
|
retryCount: -1,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function newRedlock(opts: Redlock.Options = {}) {
|
export async function newRedlock(opts: Redlock.Options = {}) {
|
||||||
let options = { ...OPTIONS.DEFAULT, ...opts }
|
const options = { ...OPTIONS.DEFAULT, ...opts }
|
||||||
const redisWrapper = await getLockClient()
|
const redisWrapper = await getLockClient()
|
||||||
const client = redisWrapper.getClient()
|
const client = redisWrapper.getClient()
|
||||||
return new Redlock([client], options)
|
return new Redlock([client], options)
|
||||||
|
@ -100,17 +101,36 @@ function getLockName(opts: LockOptions) {
|
||||||
return name
|
return name
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export const AUTO_EXTEND_POLLING_MS = Duration.fromSeconds(10).toMs()
|
||||||
|
|
||||||
export async function doWithLock<T>(
|
export async function doWithLock<T>(
|
||||||
opts: LockOptions,
|
opts: LockOptions,
|
||||||
task: () => Promise<T>
|
task: () => Promise<T>
|
||||||
): Promise<RedlockExecution<T>> {
|
): Promise<RedlockExecution<T>> {
|
||||||
const redlock = await getClient(opts.type, opts.customOptions)
|
const redlock = await getClient(opts.type, opts.customOptions)
|
||||||
let lock
|
let lock: Redlock.Lock | undefined
|
||||||
|
let timeout
|
||||||
try {
|
try {
|
||||||
const name = getLockName(opts)
|
const name = getLockName(opts)
|
||||||
|
|
||||||
|
const ttl =
|
||||||
|
opts.type === LockType.AUTO_EXTEND ? AUTO_EXTEND_POLLING_MS : opts.ttl
|
||||||
|
|
||||||
// create the lock
|
// create the lock
|
||||||
lock = await redlock.lock(name, opts.ttl)
|
lock = await redlock.lock(name, ttl)
|
||||||
|
|
||||||
|
if (opts.type === LockType.AUTO_EXTEND) {
|
||||||
|
// We keep extending the lock while the task is running
|
||||||
|
const extendInIntervals = (): void => {
|
||||||
|
timeout = setTimeout(async () => {
|
||||||
|
lock = await lock!.extend(ttl, () => opts.onExtend && opts.onExtend())
|
||||||
|
|
||||||
|
extendInIntervals()
|
||||||
|
}, ttl / 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
extendInIntervals()
|
||||||
|
}
|
||||||
|
|
||||||
// perform locked task
|
// perform locked task
|
||||||
// need to await to ensure completion before unlocking
|
// need to await to ensure completion before unlocking
|
||||||
|
@ -131,8 +151,7 @@ export async function doWithLock<T>(
|
||||||
throw e
|
throw e
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
if (lock) {
|
clearTimeout(timeout)
|
||||||
await lock.unlock()
|
await lock?.unlock()
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,105 @@
|
||||||
|
import { LockName, LockType, LockOptions } from "@budibase/types"
|
||||||
|
import { AUTO_EXTEND_POLLING_MS, doWithLock } from "../redlockImpl"
|
||||||
|
import { DBTestConfiguration, generator } from "../../../tests"
|
||||||
|
|
||||||
|
describe("redlockImpl", () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
jest.useFakeTimers()
|
||||||
|
})
|
||||||
|
|
||||||
|
describe("doWithLock", () => {
|
||||||
|
const config = new DBTestConfiguration()
|
||||||
|
const lockTtl = AUTO_EXTEND_POLLING_MS
|
||||||
|
|
||||||
|
function runLockWithExecutionTime({
|
||||||
|
opts,
|
||||||
|
task,
|
||||||
|
executionTimeMs,
|
||||||
|
}: {
|
||||||
|
opts: LockOptions
|
||||||
|
task: () => Promise<string>
|
||||||
|
executionTimeMs: number
|
||||||
|
}) {
|
||||||
|
return config.doInTenant(() =>
|
||||||
|
doWithLock(opts, async () => {
|
||||||
|
// Run in multiple intervals until hitting the expected time
|
||||||
|
const interval = lockTtl / 10
|
||||||
|
for (let i = executionTimeMs; i > 0; i -= interval) {
|
||||||
|
await jest.advanceTimersByTimeAsync(interval)
|
||||||
|
}
|
||||||
|
return task()
|
||||||
|
})
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
it.each(Object.values(LockType))(
|
||||||
|
"should return the task value and release the lock",
|
||||||
|
async (lockType: LockType) => {
|
||||||
|
const expectedResult = generator.guid()
|
||||||
|
const mockTask = jest.fn().mockResolvedValue(expectedResult)
|
||||||
|
|
||||||
|
const opts: LockOptions = {
|
||||||
|
name: LockName.PERSIST_WRITETHROUGH,
|
||||||
|
type: lockType,
|
||||||
|
ttl: lockTtl,
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await runLockWithExecutionTime({
|
||||||
|
opts,
|
||||||
|
task: mockTask,
|
||||||
|
executionTimeMs: 0,
|
||||||
|
})
|
||||||
|
|
||||||
|
expect(result.executed).toBe(true)
|
||||||
|
expect(result.executed && result.result).toBe(expectedResult)
|
||||||
|
expect(mockTask).toHaveBeenCalledTimes(1)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
it("should extend when type is autoextend", async () => {
|
||||||
|
const expectedResult = generator.guid()
|
||||||
|
const mockTask = jest.fn().mockResolvedValue(expectedResult)
|
||||||
|
const mockOnExtend = jest.fn()
|
||||||
|
|
||||||
|
const opts: LockOptions = {
|
||||||
|
name: LockName.PERSIST_WRITETHROUGH,
|
||||||
|
type: LockType.AUTO_EXTEND,
|
||||||
|
onExtend: mockOnExtend,
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await runLockWithExecutionTime({
|
||||||
|
opts,
|
||||||
|
task: mockTask,
|
||||||
|
executionTimeMs: lockTtl * 2.5,
|
||||||
|
})
|
||||||
|
|
||||||
|
expect(result.executed).toBe(true)
|
||||||
|
expect(result.executed && result.result).toBe(expectedResult)
|
||||||
|
expect(mockTask).toHaveBeenCalledTimes(1)
|
||||||
|
expect(mockOnExtend).toHaveBeenCalledTimes(5)
|
||||||
|
})
|
||||||
|
|
||||||
|
it.each(Object.values(LockType).filter(t => t !== LockType.AUTO_EXTEND))(
|
||||||
|
"should timeout when type is %s",
|
||||||
|
async (lockType: LockType) => {
|
||||||
|
const mockTask = jest.fn().mockResolvedValue("mockResult")
|
||||||
|
|
||||||
|
const opts: LockOptions = {
|
||||||
|
name: LockName.PERSIST_WRITETHROUGH,
|
||||||
|
type: lockType,
|
||||||
|
ttl: lockTtl,
|
||||||
|
}
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
runLockWithExecutionTime({
|
||||||
|
opts,
|
||||||
|
task: mockTask,
|
||||||
|
executionTimeMs: lockTtl * 2,
|
||||||
|
})
|
||||||
|
).rejects.toThrowError(
|
||||||
|
`Unable to fully release the lock on resource \"lock:${config.tenantId}_persist_writethrough\".`
|
||||||
|
)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
})
|
||||||
|
})
|
|
@ -73,6 +73,9 @@ export async function encryptFile(
|
||||||
const outputFileName = `${filename}.enc`
|
const outputFileName = `${filename}.enc`
|
||||||
|
|
||||||
const filePath = join(dir, filename)
|
const filePath = join(dir, filename)
|
||||||
|
if (fs.lstatSync(filePath).isDirectory()) {
|
||||||
|
throw new Error("Unable to encrypt directory")
|
||||||
|
}
|
||||||
const inputFile = fs.createReadStream(filePath)
|
const inputFile = fs.createReadStream(filePath)
|
||||||
const outputFile = fs.createWriteStream(join(dir, outputFileName))
|
const outputFile = fs.createWriteStream(join(dir, outputFileName))
|
||||||
|
|
||||||
|
@ -110,6 +113,9 @@ export async function decryptFile(
|
||||||
outputPath: string,
|
outputPath: string,
|
||||||
secret: string
|
secret: string
|
||||||
) {
|
) {
|
||||||
|
if (fs.lstatSync(inputPath).isDirectory()) {
|
||||||
|
throw new Error("Unable to encrypt directory")
|
||||||
|
}
|
||||||
const { salt, iv } = await getSaltAndIV(inputPath)
|
const { salt, iv } = await getSaltAndIV(inputPath)
|
||||||
const inputFile = fs.createReadStream(inputPath, {
|
const inputFile = fs.createReadStream(inputPath, {
|
||||||
start: SALT_LENGTH + IV_LENGTH,
|
start: SALT_LENGTH + IV_LENGTH,
|
||||||
|
|
|
@ -93,11 +93,19 @@ export const getTenantIDFromCtx = (
|
||||||
// subdomain
|
// subdomain
|
||||||
if (isAllowed(TenantResolutionStrategy.SUBDOMAIN)) {
|
if (isAllowed(TenantResolutionStrategy.SUBDOMAIN)) {
|
||||||
// e.g. budibase.app or local.com:10000
|
// e.g. budibase.app or local.com:10000
|
||||||
const platformHost = new URL(getPlatformURL()).host.split(":")[0]
|
let platformHost
|
||||||
|
try {
|
||||||
|
platformHost = new URL(getPlatformURL()).host.split(":")[0]
|
||||||
|
} catch (err: any) {
|
||||||
|
// if invalid URL, just don't try to process subdomain
|
||||||
|
if (err.code !== "ERR_INVALID_URL") {
|
||||||
|
throw err
|
||||||
|
}
|
||||||
|
}
|
||||||
// e.g. tenant.budibase.app or tenant.local.com
|
// e.g. tenant.budibase.app or tenant.local.com
|
||||||
const requestHost = ctx.host
|
const requestHost = ctx.host
|
||||||
// parse the tenant id from the difference
|
// parse the tenant id from the difference
|
||||||
if (requestHost.includes(platformHost)) {
|
if (platformHost && requestHost.includes(platformHost)) {
|
||||||
const tenantId = requestHost.substring(
|
const tenantId = requestHost.substring(
|
||||||
0,
|
0,
|
||||||
requestHost.indexOf(`.${platformHost}`)
|
requestHost.indexOf(`.${platformHost}`)
|
||||||
|
|
|
@ -11,8 +11,7 @@ import {
|
||||||
TenantResolutionStrategy,
|
TenantResolutionStrategy,
|
||||||
} from "@budibase/types"
|
} from "@budibase/types"
|
||||||
import type { SetOption } from "cookies"
|
import type { SetOption } from "cookies"
|
||||||
|
import jwt, { Secret } from "jsonwebtoken"
|
||||||
const jwt = require("jsonwebtoken")
|
|
||||||
|
|
||||||
const APP_PREFIX = DocumentType.APP + SEPARATOR
|
const APP_PREFIX = DocumentType.APP + SEPARATOR
|
||||||
const PROD_APP_PREFIX = "/app/"
|
const PROD_APP_PREFIX = "/app/"
|
||||||
|
@ -60,10 +59,7 @@ export function isServingApp(ctx: Ctx) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// prod app
|
// prod app
|
||||||
if (ctx.path.startsWith(PROD_APP_PREFIX)) {
|
return ctx.path.startsWith(PROD_APP_PREFIX)
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export function isServingBuilder(ctx: Ctx): boolean {
|
export function isServingBuilder(ctx: Ctx): boolean {
|
||||||
|
@ -138,16 +134,16 @@ function parseAppIdFromUrl(url?: string) {
|
||||||
* opens the contents of the specified encrypted JWT.
|
* opens the contents of the specified encrypted JWT.
|
||||||
* @return the contents of the token.
|
* @return the contents of the token.
|
||||||
*/
|
*/
|
||||||
export function openJwt(token: string) {
|
export function openJwt<T>(token?: string): T | undefined {
|
||||||
if (!token) {
|
if (!token) {
|
||||||
return token
|
return undefined
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
return jwt.verify(token, env.JWT_SECRET)
|
return jwt.verify(token, env.JWT_SECRET as Secret) as T
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
if (env.JWT_SECRET_FALLBACK) {
|
if (env.JWT_SECRET_FALLBACK) {
|
||||||
// fallback to enable rotation
|
// fallback to enable rotation
|
||||||
return jwt.verify(token, env.JWT_SECRET_FALLBACK)
|
return jwt.verify(token, env.JWT_SECRET_FALLBACK) as T
|
||||||
} else {
|
} else {
|
||||||
throw e
|
throw e
|
||||||
}
|
}
|
||||||
|
@ -159,13 +155,9 @@ export function isValidInternalAPIKey(apiKey: string) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// fallback to enable rotation
|
// fallback to enable rotation
|
||||||
if (
|
return !!(
|
||||||
env.INTERNAL_API_KEY_FALLBACK &&
|
env.INTERNAL_API_KEY_FALLBACK && env.INTERNAL_API_KEY_FALLBACK === apiKey
|
||||||
env.INTERNAL_API_KEY_FALLBACK === apiKey
|
)
|
||||||
) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -173,14 +165,14 @@ export function isValidInternalAPIKey(apiKey: string) {
|
||||||
* @param ctx The request which is to be manipulated.
|
* @param ctx The request which is to be manipulated.
|
||||||
* @param name The name of the cookie to get.
|
* @param name The name of the cookie to get.
|
||||||
*/
|
*/
|
||||||
export function getCookie(ctx: Ctx, name: string) {
|
export function getCookie<T>(ctx: Ctx, name: string) {
|
||||||
const cookie = ctx.cookies.get(name)
|
const cookie = ctx.cookies.get(name)
|
||||||
|
|
||||||
if (!cookie) {
|
if (!cookie) {
|
||||||
return cookie
|
return undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
return openJwt(cookie)
|
return openJwt<T>(cookie)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -197,7 +189,7 @@ export function setCookie(
|
||||||
opts = { sign: true }
|
opts = { sign: true }
|
||||||
) {
|
) {
|
||||||
if (value && opts && opts.sign) {
|
if (value && opts && opts.sign) {
|
||||||
value = jwt.sign(value, env.JWT_SECRET)
|
value = jwt.sign(value, env.JWT_SECRET as Secret)
|
||||||
}
|
}
|
||||||
|
|
||||||
const config: SetOption = {
|
const config: SetOption = {
|
||||||
|
|
|
@ -53,7 +53,7 @@
|
||||||
$: {
|
$: {
|
||||||
if (selectedImage?.url) {
|
if (selectedImage?.url) {
|
||||||
selectedUrl = selectedImage?.url
|
selectedUrl = selectedImage?.url
|
||||||
} else if (selectedImage) {
|
} else if (selectedImage && isImage) {
|
||||||
try {
|
try {
|
||||||
let reader = new FileReader()
|
let reader = new FileReader()
|
||||||
reader.readAsDataURL(selectedImage)
|
reader.readAsDataURL(selectedImage)
|
||||||
|
|
|
@ -30,6 +30,12 @@ const CAPTURE_VAR_INSIDE_TEMPLATE = /{{([^}]+)}}/g
|
||||||
const CAPTURE_VAR_INSIDE_JS = /\$\("([^")]+)"\)/g
|
const CAPTURE_VAR_INSIDE_JS = /\$\("([^")]+)"\)/g
|
||||||
const CAPTURE_HBS_TEMPLATE = /{{[\S\s]*?}}/g
|
const CAPTURE_HBS_TEMPLATE = /{{[\S\s]*?}}/g
|
||||||
|
|
||||||
|
const UpdateReferenceAction = {
|
||||||
|
ADD: "add",
|
||||||
|
DELETE: "delete",
|
||||||
|
MOVE: "move",
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets all bindable data context fields and instance fields.
|
* Gets all bindable data context fields and instance fields.
|
||||||
*/
|
*/
|
||||||
|
@ -1275,3 +1281,81 @@ export const runtimeToReadableBinding = (
|
||||||
"readableBinding"
|
"readableBinding"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Used to update binding references for automation or action steps
|
||||||
|
*
|
||||||
|
* @param obj - The object to be updated
|
||||||
|
* @param originalIndex - The original index of the step being moved. Not applicable to add/delete.
|
||||||
|
* @param modifiedIndex - The new index of the step being modified
|
||||||
|
* @param action - Used to determine if a step is being added, deleted or moved
|
||||||
|
* @param label - The binding text that describes the steps
|
||||||
|
*/
|
||||||
|
export const updateReferencesInObject = ({
|
||||||
|
obj,
|
||||||
|
modifiedIndex,
|
||||||
|
action,
|
||||||
|
label,
|
||||||
|
originalIndex,
|
||||||
|
}) => {
|
||||||
|
const stepIndexRegex = new RegExp(`{{\\s*${label}\\.(\\d+)\\.`, "g")
|
||||||
|
const updateActionStep = (str, index, replaceWith) =>
|
||||||
|
str.replace(`{{ ${label}.${index}.`, `{{ ${label}.${replaceWith}.`)
|
||||||
|
for (const key in obj) {
|
||||||
|
if (typeof obj[key] === "string") {
|
||||||
|
let matches
|
||||||
|
while ((matches = stepIndexRegex.exec(obj[key])) !== null) {
|
||||||
|
const referencedStep = parseInt(matches[1])
|
||||||
|
if (
|
||||||
|
action === UpdateReferenceAction.ADD &&
|
||||||
|
referencedStep >= modifiedIndex
|
||||||
|
) {
|
||||||
|
obj[key] = updateActionStep(
|
||||||
|
obj[key],
|
||||||
|
referencedStep,
|
||||||
|
referencedStep + 1
|
||||||
|
)
|
||||||
|
} else if (
|
||||||
|
action === UpdateReferenceAction.DELETE &&
|
||||||
|
referencedStep > modifiedIndex
|
||||||
|
) {
|
||||||
|
obj[key] = updateActionStep(
|
||||||
|
obj[key],
|
||||||
|
referencedStep,
|
||||||
|
referencedStep - 1
|
||||||
|
)
|
||||||
|
} else if (action === UpdateReferenceAction.MOVE) {
|
||||||
|
if (referencedStep === originalIndex) {
|
||||||
|
obj[key] = updateActionStep(obj[key], referencedStep, modifiedIndex)
|
||||||
|
} else if (
|
||||||
|
modifiedIndex <= referencedStep &&
|
||||||
|
modifiedIndex < originalIndex
|
||||||
|
) {
|
||||||
|
obj[key] = updateActionStep(
|
||||||
|
obj[key],
|
||||||
|
referencedStep,
|
||||||
|
referencedStep + 1
|
||||||
|
)
|
||||||
|
} else if (
|
||||||
|
modifiedIndex >= referencedStep &&
|
||||||
|
modifiedIndex > originalIndex
|
||||||
|
) {
|
||||||
|
obj[key] = updateActionStep(
|
||||||
|
obj[key],
|
||||||
|
referencedStep,
|
||||||
|
referencedStep - 1
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (typeof obj[key] === "object" && obj[key] !== null) {
|
||||||
|
updateReferencesInObject({
|
||||||
|
obj: obj[key],
|
||||||
|
modifiedIndex,
|
||||||
|
action,
|
||||||
|
label,
|
||||||
|
originalIndex,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import { cloneDeep } from "lodash/fp"
|
||||||
import { generate } from "shortid"
|
import { generate } from "shortid"
|
||||||
import { selectedAutomation } from "builderStore"
|
import { selectedAutomation } from "builderStore"
|
||||||
import { notifications } from "@budibase/bbui"
|
import { notifications } from "@budibase/bbui"
|
||||||
|
import { updateReferencesInObject } from "builderStore/dataBinding"
|
||||||
|
|
||||||
const initialAutomationState = {
|
const initialAutomationState = {
|
||||||
automations: [],
|
automations: [],
|
||||||
|
@ -22,34 +23,14 @@ export const getAutomationStore = () => {
|
||||||
return store
|
return store
|
||||||
}
|
}
|
||||||
|
|
||||||
const updateReferencesInObject = (obj, modifiedIndex, action) => {
|
|
||||||
const regex = /{{\s*steps\.(\d+)\./g
|
|
||||||
for (const key in obj) {
|
|
||||||
if (typeof obj[key] === "string") {
|
|
||||||
let matches
|
|
||||||
while ((matches = regex.exec(obj[key])) !== null) {
|
|
||||||
const referencedStep = parseInt(matches[1])
|
|
||||||
if (action === "add" && referencedStep >= modifiedIndex) {
|
|
||||||
obj[key] = obj[key].replace(
|
|
||||||
`{{ steps.${referencedStep}.`,
|
|
||||||
`{{ steps.${referencedStep + 1}.`
|
|
||||||
)
|
|
||||||
} else if (action === "delete" && referencedStep > modifiedIndex) {
|
|
||||||
obj[key] = obj[key].replace(
|
|
||||||
`{{ steps.${referencedStep}.`,
|
|
||||||
`{{ steps.${referencedStep - 1}.`
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (typeof obj[key] === "object" && obj[key] !== null) {
|
|
||||||
updateReferencesInObject(obj[key], modifiedIndex, action)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const updateStepReferences = (steps, modifiedIndex, action) => {
|
const updateStepReferences = (steps, modifiedIndex, action) => {
|
||||||
steps.forEach(step => {
|
steps.forEach(step => {
|
||||||
updateReferencesInObject(step.inputs, modifiedIndex, action)
|
updateReferencesInObject({
|
||||||
|
obj: step.inputs,
|
||||||
|
modifiedIndex,
|
||||||
|
action,
|
||||||
|
label: "steps",
|
||||||
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@ import { expect, describe, it, vi } from "vitest"
|
||||||
import {
|
import {
|
||||||
runtimeToReadableBinding,
|
runtimeToReadableBinding,
|
||||||
readableToRuntimeBinding,
|
readableToRuntimeBinding,
|
||||||
|
updateReferencesInObject,
|
||||||
} from "../dataBinding"
|
} from "../dataBinding"
|
||||||
|
|
||||||
vi.mock("@budibase/frontend-core")
|
vi.mock("@budibase/frontend-core")
|
||||||
|
@ -84,3 +85,461 @@ describe("readableToRuntimeBinding", () => {
|
||||||
).toEqual(`Hello {{ [user].[firstName] }}! The count is {{ count }}.`)
|
).toEqual(`Hello {{ [user].[firstName] }}! The count is {{ count }}.`)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
describe("updateReferencesInObject", () => {
|
||||||
|
it("should increment steps in sequence on 'add'", () => {
|
||||||
|
let obj = [
|
||||||
|
{
|
||||||
|
id: "a0",
|
||||||
|
parameters: {
|
||||||
|
text: "Alpha",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "a1",
|
||||||
|
parameters: {
|
||||||
|
text: "Apple",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "b2",
|
||||||
|
parameters: {
|
||||||
|
text: "Banana {{ actions.1.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "c3",
|
||||||
|
parameters: {
|
||||||
|
text: "Carrot {{ actions.1.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "d4",
|
||||||
|
parameters: {
|
||||||
|
text: "Dog {{ actions.3.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "e5",
|
||||||
|
parameters: {
|
||||||
|
text: "Eagle {{ actions.4.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
updateReferencesInObject({
|
||||||
|
obj,
|
||||||
|
modifiedIndex: 0,
|
||||||
|
action: "add",
|
||||||
|
label: "actions",
|
||||||
|
})
|
||||||
|
|
||||||
|
expect(obj).toEqual([
|
||||||
|
{
|
||||||
|
id: "a0",
|
||||||
|
parameters: {
|
||||||
|
text: "Alpha",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "a1",
|
||||||
|
parameters: {
|
||||||
|
text: "Apple",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "b2",
|
||||||
|
parameters: {
|
||||||
|
text: "Banana {{ actions.2.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "c3",
|
||||||
|
parameters: {
|
||||||
|
text: "Carrot {{ actions.2.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "d4",
|
||||||
|
parameters: {
|
||||||
|
text: "Dog {{ actions.4.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "e5",
|
||||||
|
parameters: {
|
||||||
|
text: "Eagle {{ actions.5.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it("should decrement steps in sequence on 'delete'", () => {
|
||||||
|
let obj = [
|
||||||
|
{
|
||||||
|
id: "a1",
|
||||||
|
parameters: {
|
||||||
|
text: "Apple",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "b2",
|
||||||
|
parameters: {
|
||||||
|
text: "Banana {{ actions.1.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "d4",
|
||||||
|
parameters: {
|
||||||
|
text: "Dog {{ actions.3.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "e5",
|
||||||
|
parameters: {
|
||||||
|
text: "Eagle {{ actions.4.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
updateReferencesInObject({
|
||||||
|
obj,
|
||||||
|
modifiedIndex: 2,
|
||||||
|
action: "delete",
|
||||||
|
label: "actions",
|
||||||
|
})
|
||||||
|
|
||||||
|
expect(obj).toEqual([
|
||||||
|
{
|
||||||
|
id: "a1",
|
||||||
|
parameters: {
|
||||||
|
text: "Apple",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "b2",
|
||||||
|
parameters: {
|
||||||
|
text: "Banana {{ actions.1.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "d4",
|
||||||
|
parameters: {
|
||||||
|
text: "Dog {{ actions.2.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "e5",
|
||||||
|
parameters: {
|
||||||
|
text: "Eagle {{ actions.3.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it("should handle on 'move' to a lower index", () => {
|
||||||
|
let obj = [
|
||||||
|
{
|
||||||
|
id: "a1",
|
||||||
|
parameters: {
|
||||||
|
text: "Apple",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "b2",
|
||||||
|
parameters: {
|
||||||
|
text: "Banana {{ actions.0.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "e5",
|
||||||
|
parameters: {
|
||||||
|
text: "Eagle {{ actions.3.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "c3",
|
||||||
|
parameters: {
|
||||||
|
text: "Carrot {{ actions.0.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "d4",
|
||||||
|
parameters: {
|
||||||
|
text: "Dog {{ actions.2.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
updateReferencesInObject({
|
||||||
|
obj,
|
||||||
|
modifiedIndex: 2,
|
||||||
|
action: "move",
|
||||||
|
label: "actions",
|
||||||
|
originalIndex: 4,
|
||||||
|
})
|
||||||
|
|
||||||
|
expect(obj).toEqual([
|
||||||
|
{
|
||||||
|
id: "a1",
|
||||||
|
parameters: {
|
||||||
|
text: "Apple",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "b2",
|
||||||
|
parameters: {
|
||||||
|
text: "Banana {{ actions.0.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "e5",
|
||||||
|
parameters: {
|
||||||
|
text: "Eagle {{ actions.4.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "c3",
|
||||||
|
parameters: {
|
||||||
|
text: "Carrot {{ actions.0.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "d4",
|
||||||
|
parameters: {
|
||||||
|
text: "Dog {{ actions.3.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it("should handle on 'move' to a higher index", () => {
|
||||||
|
let obj = [
|
||||||
|
{
|
||||||
|
id: "b2",
|
||||||
|
parameters: {
|
||||||
|
text: "Banana {{ actions.0.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "c3",
|
||||||
|
parameters: {
|
||||||
|
text: "Carrot {{ actions.0.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "a1",
|
||||||
|
parameters: {
|
||||||
|
text: "Apple",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "d4",
|
||||||
|
parameters: {
|
||||||
|
text: "Dog {{ actions.2.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "e5",
|
||||||
|
parameters: {
|
||||||
|
text: "Eagle {{ actions.3.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
updateReferencesInObject({
|
||||||
|
obj,
|
||||||
|
modifiedIndex: 2,
|
||||||
|
action: "move",
|
||||||
|
label: "actions",
|
||||||
|
originalIndex: 0,
|
||||||
|
})
|
||||||
|
|
||||||
|
expect(obj).toEqual([
|
||||||
|
{
|
||||||
|
id: "b2",
|
||||||
|
parameters: {
|
||||||
|
text: "Banana {{ actions.2.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "c3",
|
||||||
|
parameters: {
|
||||||
|
text: "Carrot {{ actions.2.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "a1",
|
||||||
|
parameters: {
|
||||||
|
text: "Apple",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "d4",
|
||||||
|
parameters: {
|
||||||
|
text: "Dog {{ actions.1.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "e5",
|
||||||
|
parameters: {
|
||||||
|
text: "Eagle {{ actions.3.row }}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it("should handle on 'move' of action being referenced, dragged to a higher index", () => {
|
||||||
|
let obj = [
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Validate Form",
|
||||||
|
id: "cCD0Dwcnq",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Close Screen Modal",
|
||||||
|
id: "3fbbIOfN0H",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Save Row",
|
||||||
|
parameters: {
|
||||||
|
tableId: "ta_bb_employee",
|
||||||
|
},
|
||||||
|
id: "aehg5cTmhR",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Close Side Panel",
|
||||||
|
id: "mzkpf86cxo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Navigate To",
|
||||||
|
id: "h0uDFeJa8A",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
parameters: {
|
||||||
|
autoDismiss: true,
|
||||||
|
type: "success",
|
||||||
|
message: "{{ actions.1.row }}",
|
||||||
|
},
|
||||||
|
"##eventHandlerType": "Show Notification",
|
||||||
|
id: "JEI5lAyJZ",
|
||||||
|
},
|
||||||
|
]
|
||||||
|
updateReferencesInObject({
|
||||||
|
obj,
|
||||||
|
modifiedIndex: 2,
|
||||||
|
action: "move",
|
||||||
|
label: "actions",
|
||||||
|
originalIndex: 1,
|
||||||
|
})
|
||||||
|
|
||||||
|
expect(obj).toEqual([
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Validate Form",
|
||||||
|
id: "cCD0Dwcnq",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Close Screen Modal",
|
||||||
|
id: "3fbbIOfN0H",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Save Row",
|
||||||
|
parameters: {
|
||||||
|
tableId: "ta_bb_employee",
|
||||||
|
},
|
||||||
|
id: "aehg5cTmhR",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Close Side Panel",
|
||||||
|
id: "mzkpf86cxo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Navigate To",
|
||||||
|
id: "h0uDFeJa8A",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
parameters: {
|
||||||
|
autoDismiss: true,
|
||||||
|
type: "success",
|
||||||
|
message: "{{ actions.2.row }}",
|
||||||
|
},
|
||||||
|
"##eventHandlerType": "Show Notification",
|
||||||
|
id: "JEI5lAyJZ",
|
||||||
|
},
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it("should handle on 'move' of action being referenced, dragged to a lower index", () => {
|
||||||
|
let obj = [
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Save Row",
|
||||||
|
parameters: {
|
||||||
|
tableId: "ta_bb_employee",
|
||||||
|
},
|
||||||
|
id: "aehg5cTmhR",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Validate Form",
|
||||||
|
id: "cCD0Dwcnq",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Close Screen Modal",
|
||||||
|
id: "3fbbIOfN0H",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Close Side Panel",
|
||||||
|
id: "mzkpf86cxo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Navigate To",
|
||||||
|
id: "h0uDFeJa8A",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
parameters: {
|
||||||
|
autoDismiss: true,
|
||||||
|
type: "success",
|
||||||
|
message: "{{ actions.4.row }}",
|
||||||
|
},
|
||||||
|
"##eventHandlerType": "Show Notification",
|
||||||
|
id: "JEI5lAyJZ",
|
||||||
|
},
|
||||||
|
]
|
||||||
|
updateReferencesInObject({
|
||||||
|
obj,
|
||||||
|
modifiedIndex: 0,
|
||||||
|
action: "move",
|
||||||
|
label: "actions",
|
||||||
|
originalIndex: 4,
|
||||||
|
})
|
||||||
|
|
||||||
|
expect(obj).toEqual([
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Save Row",
|
||||||
|
parameters: {
|
||||||
|
tableId: "ta_bb_employee",
|
||||||
|
},
|
||||||
|
id: "aehg5cTmhR",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Validate Form",
|
||||||
|
id: "cCD0Dwcnq",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Close Screen Modal",
|
||||||
|
id: "3fbbIOfN0H",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Close Side Panel",
|
||||||
|
id: "mzkpf86cxo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"##eventHandlerType": "Navigate To",
|
||||||
|
id: "h0uDFeJa8A",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
parameters: {
|
||||||
|
autoDismiss: true,
|
||||||
|
type: "success",
|
||||||
|
message: "{{ actions.0.row }}",
|
||||||
|
},
|
||||||
|
"##eventHandlerType": "Show Notification",
|
||||||
|
id: "JEI5lAyJZ",
|
||||||
|
},
|
||||||
|
])
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
|
@ -57,16 +57,11 @@
|
||||||
}}
|
}}
|
||||||
class="buttons"
|
class="buttons"
|
||||||
>
|
>
|
||||||
<Icon hoverable size="M" name="Play" />
|
<Icon size="M" name="Play" />
|
||||||
<div>Run test</div>
|
<div>Run test</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="buttons">
|
<div class="buttons">
|
||||||
<Icon
|
<Icon disabled={!$automationStore.testResults} size="M" name="Multiple" />
|
||||||
disabled={!$automationStore.testResults}
|
|
||||||
hoverable
|
|
||||||
size="M"
|
|
||||||
name="Multiple"
|
|
||||||
/>
|
|
||||||
<div
|
<div
|
||||||
class:disabled={!$automationStore.testResults}
|
class:disabled={!$automationStore.testResults}
|
||||||
on:click={() => {
|
on:click={() => {
|
||||||
|
|
|
@ -97,6 +97,7 @@
|
||||||
class:typing={typing && !automationNameError}
|
class:typing={typing && !automationNameError}
|
||||||
class:typing-error={automationNameError}
|
class:typing-error={automationNameError}
|
||||||
class="blockSection"
|
class="blockSection"
|
||||||
|
on:click={() => dispatch("toggle")}
|
||||||
>
|
>
|
||||||
<div class="splitHeader">
|
<div class="splitHeader">
|
||||||
<div class="center-items">
|
<div class="center-items">
|
||||||
|
@ -138,7 +139,20 @@
|
||||||
on:input={e => {
|
on:input={e => {
|
||||||
automationName = e.target.value.trim()
|
automationName = e.target.value.trim()
|
||||||
}}
|
}}
|
||||||
on:click={startTyping}
|
on:click={e => {
|
||||||
|
e.stopPropagation()
|
||||||
|
startTyping()
|
||||||
|
}}
|
||||||
|
on:keydown={async e => {
|
||||||
|
if (e.key === "Enter") {
|
||||||
|
typing = false
|
||||||
|
if (automationNameError) {
|
||||||
|
automationName = stepNames[block.id] || block?.name
|
||||||
|
} else {
|
||||||
|
await saveName()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}}
|
||||||
on:blur={async () => {
|
on:blur={async () => {
|
||||||
typing = false
|
typing = false
|
||||||
if (automationNameError) {
|
if (automationNameError) {
|
||||||
|
@ -168,7 +182,11 @@
|
||||||
</StatusLight>
|
</StatusLight>
|
||||||
</div>
|
</div>
|
||||||
<Icon
|
<Icon
|
||||||
on:click={() => dispatch("toggle")}
|
e.stopPropagation()
|
||||||
|
on:click={e => {
|
||||||
|
e.stopPropagation()
|
||||||
|
dispatch("toggle")
|
||||||
|
}}
|
||||||
hoverable
|
hoverable
|
||||||
name={open ? "ChevronUp" : "ChevronDown"}
|
name={open ? "ChevronUp" : "ChevronDown"}
|
||||||
/>
|
/>
|
||||||
|
@ -195,7 +213,10 @@
|
||||||
{/if}
|
{/if}
|
||||||
{#if !showTestStatus}
|
{#if !showTestStatus}
|
||||||
<Icon
|
<Icon
|
||||||
on:click={() => dispatch("toggle")}
|
on:click={e => {
|
||||||
|
e.stopPropagation()
|
||||||
|
dispatch("toggle")
|
||||||
|
}}
|
||||||
hoverable
|
hoverable
|
||||||
name={open ? "ChevronUp" : "ChevronDown"}
|
name={open ? "ChevronUp" : "ChevronDown"}
|
||||||
/>
|
/>
|
||||||
|
|
|
@ -1,11 +1,9 @@
|
||||||
<script>
|
<script>
|
||||||
import {
|
import {
|
||||||
ModalContent,
|
ModalContent,
|
||||||
Tabs,
|
|
||||||
Tab,
|
|
||||||
TextArea,
|
TextArea,
|
||||||
Label,
|
|
||||||
notifications,
|
notifications,
|
||||||
|
ActionButton,
|
||||||
} from "@budibase/bbui"
|
} from "@budibase/bbui"
|
||||||
import { automationStore, selectedAutomation } from "builderStore"
|
import { automationStore, selectedAutomation } from "builderStore"
|
||||||
import AutomationBlockSetup from "../../SetupPanel/AutomationBlockSetup.svelte"
|
import AutomationBlockSetup from "../../SetupPanel/AutomationBlockSetup.svelte"
|
||||||
|
@ -55,50 +53,69 @@
|
||||||
notifications.error(error)
|
notifications.error(error)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const toggle = () => {
|
||||||
|
selectedValues = !selectedValues
|
||||||
|
selectedJSON = !selectedJSON
|
||||||
|
}
|
||||||
|
let selectedValues = true
|
||||||
|
let selectedJSON = false
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
<ModalContent
|
<ModalContent
|
||||||
title="Add test data"
|
title="Add test data"
|
||||||
confirmText="Test"
|
confirmText="Run test"
|
||||||
size="M"
|
size="L"
|
||||||
showConfirmButton={true}
|
showConfirmButton={true}
|
||||||
disabled={isError}
|
disabled={isError}
|
||||||
onConfirm={testAutomation}
|
onConfirm={testAutomation}
|
||||||
cancelText="Cancel"
|
cancelText="Cancel"
|
||||||
>
|
>
|
||||||
<Tabs selected="Form" quiet>
|
<div class="size">
|
||||||
<Tab icon="Form" title="Form">
|
<div class="options">
|
||||||
<div class="tab-content-padding">
|
<ActionButton quiet selected={selectedValues} on:click={toggle}
|
||||||
<AutomationBlockSetup
|
>Use values</ActionButton
|
||||||
{testData}
|
>
|
||||||
{schemaProperties}
|
<ActionButton quiet selected={selectedJSON} on:click={toggle}
|
||||||
isTestModal
|
>Use JSON</ActionButton
|
||||||
block={trigger}
|
>
|
||||||
/>
|
</div>
|
||||||
</div></Tab
|
</div>
|
||||||
>
|
|
||||||
<Tab icon="FileJson" title="JSON">
|
{#if selectedValues}
|
||||||
<div class="tab-content-padding">
|
<div class="tab-content-padding">
|
||||||
<Label>JSON</Label>
|
<AutomationBlockSetup
|
||||||
<div class="text-area-container">
|
{testData}
|
||||||
<TextArea
|
{schemaProperties}
|
||||||
value={JSON.stringify($selectedAutomation.testData, null, 2)}
|
isTestModal
|
||||||
error={failedParse}
|
block={trigger}
|
||||||
on:change={e => parseTestJSON(e)}
|
/>
|
||||||
/>
|
</div>
|
||||||
</div>
|
{/if}
|
||||||
</div>
|
{#if selectedJSON}
|
||||||
</Tab>
|
<div class="text-area-container">
|
||||||
</Tabs>
|
<TextArea
|
||||||
|
value={JSON.stringify($selectedAutomation.testData, null, 2)}
|
||||||
|
error={failedParse}
|
||||||
|
on:change={e => parseTestJSON(e)}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
{/if}
|
||||||
</ModalContent>
|
</ModalContent>
|
||||||
|
|
||||||
<style>
|
<style>
|
||||||
.text-area-container :global(textarea) {
|
.text-area-container :global(textarea) {
|
||||||
min-height: 200px;
|
min-height: 300px;
|
||||||
height: 200px;
|
height: 300px;
|
||||||
}
|
}
|
||||||
|
|
||||||
.tab-content-padding {
|
.tab-content-padding {
|
||||||
padding: 0 var(--spacing-xl);
|
padding: 0 var(--spacing-s);
|
||||||
|
}
|
||||||
|
|
||||||
|
.options {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 8px;
|
||||||
}
|
}
|
||||||
</style>
|
</style>
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
<div class="title">
|
<div class="title">
|
||||||
<div class="title-text">
|
<div class="title-text">
|
||||||
<Icon name="MultipleCheck" />
|
<Icon name="MultipleCheck" />
|
||||||
<div style="padding-left: var(--spacing-l)">Test Details</div>
|
<div style="padding-left: var(--spacing-l); ">Test Details</div>
|
||||||
</div>
|
</div>
|
||||||
<div style="padding-right: var(--spacing-xl)">
|
<div style="padding-right: var(--spacing-xl)">
|
||||||
<Icon
|
<Icon
|
||||||
|
@ -40,6 +40,7 @@
|
||||||
display: flex;
|
display: flex;
|
||||||
flex-direction: row;
|
flex-direction: row;
|
||||||
align-items: center;
|
align-items: center;
|
||||||
|
padding-top: var(--spacing-s);
|
||||||
}
|
}
|
||||||
|
|
||||||
.title :global(h1) {
|
.title :global(h1) {
|
||||||
|
|
|
@ -1,20 +1,44 @@
|
||||||
<script>
|
<script>
|
||||||
import AutomationList from "./AutomationList.svelte"
|
import AutomationList from "./AutomationList.svelte"
|
||||||
import CreateAutomationModal from "./CreateAutomationModal.svelte"
|
import CreateAutomationModal from "./CreateAutomationModal.svelte"
|
||||||
import { Modal, Button, Layout } from "@budibase/bbui"
|
import { Modal, Icon } from "@budibase/bbui"
|
||||||
import Panel from "components/design/Panel.svelte"
|
import Panel from "components/design/Panel.svelte"
|
||||||
|
|
||||||
export let modal
|
export let modal
|
||||||
export let webhookModal
|
export let webhookModal
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
<Panel title="Automations" borderRight>
|
<Panel title="Automations" borderRight noHeaderBorder titleCSS={false}>
|
||||||
<Layout paddingX="L" paddingY="XL" gap="S">
|
<span class="panel-title-content" slot="panel-title-content">
|
||||||
<Button cta on:click={modal.show}>Add automation</Button>
|
<div class="header">
|
||||||
</Layout>
|
<div>Automations</div>
|
||||||
|
<div on:click={modal.show} class="add-automation-button">
|
||||||
|
<Icon name="Add" />
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</span>
|
||||||
<AutomationList />
|
<AutomationList />
|
||||||
</Panel>
|
</Panel>
|
||||||
|
|
||||||
<Modal bind:this={modal}>
|
<Modal bind:this={modal}>
|
||||||
<CreateAutomationModal {webhookModal} />
|
<CreateAutomationModal {webhookModal} />
|
||||||
</Modal>
|
</Modal>
|
||||||
|
|
||||||
|
<style>
|
||||||
|
.header {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: space-between;
|
||||||
|
gap: var(--spacing-m);
|
||||||
|
}
|
||||||
|
|
||||||
|
.add-automation-button {
|
||||||
|
margin-left: 130px;
|
||||||
|
color: var(--grey-7);
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
.add-automation-button:hover {
|
||||||
|
color: var(--ink);
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
<Select
|
<Select
|
||||||
on:change={onChange}
|
on:change={onChange}
|
||||||
bind:value
|
bind:value
|
||||||
options={filteredTables.filter(table => table._id !== TableNames.USERS)}
|
options={filteredTables}
|
||||||
getOptionLabel={table => table.name}
|
getOptionLabel={table => table.name}
|
||||||
getOptionValue={table => table._id}
|
getOptionValue={table => table._id}
|
||||||
/>
|
/>
|
||||||
|
|
|
@ -149,7 +149,6 @@
|
||||||
}
|
}
|
||||||
const initialiseField = (field, savingColumn) => {
|
const initialiseField = (field, savingColumn) => {
|
||||||
isCreating = !field
|
isCreating = !field
|
||||||
|
|
||||||
if (field && !savingColumn) {
|
if (field && !savingColumn) {
|
||||||
editableColumn = cloneDeep(field)
|
editableColumn = cloneDeep(field)
|
||||||
originalName = editableColumn.name ? editableColumn.name + "" : null
|
originalName = editableColumn.name ? editableColumn.name + "" : null
|
||||||
|
@ -171,7 +170,8 @@
|
||||||
relationshipPart2 = part2
|
relationshipPart2 = part2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (!savingColumn) {
|
}
|
||||||
|
if (!savingColumn) {
|
||||||
let highestNumber = 0
|
let highestNumber = 0
|
||||||
Object.keys(table.schema).forEach(columnName => {
|
Object.keys(table.schema).forEach(columnName => {
|
||||||
const columnNumber = extractColumnNumber(columnName)
|
const columnNumber = extractColumnNumber(columnName)
|
||||||
|
@ -307,12 +307,6 @@
|
||||||
dispatch("updatecolumns")
|
dispatch("updatecolumns")
|
||||||
gridDispatch("close-edit-column")
|
gridDispatch("close-edit-column")
|
||||||
|
|
||||||
if (saveColumn.type === LINK_TYPE) {
|
|
||||||
// Fetching the new tables
|
|
||||||
tables.fetch()
|
|
||||||
// Fetching the new relationships
|
|
||||||
datasources.fetch()
|
|
||||||
}
|
|
||||||
if (originalName) {
|
if (originalName) {
|
||||||
notifications.success("Column updated successfully")
|
notifications.success("Column updated successfully")
|
||||||
} else {
|
} else {
|
||||||
|
@ -339,11 +333,6 @@
|
||||||
confirmDeleteDialog.hide()
|
confirmDeleteDialog.hide()
|
||||||
dispatch("updatecolumns")
|
dispatch("updatecolumns")
|
||||||
gridDispatch("close-edit-column")
|
gridDispatch("close-edit-column")
|
||||||
|
|
||||||
if (editableColumn.type === LINK_TYPE) {
|
|
||||||
// Updating the relationships
|
|
||||||
datasources.fetch()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
notifications.error(`Error deleting column: ${error.message}`)
|
notifications.error(`Error deleting column: ${error.message}`)
|
||||||
|
@ -540,8 +529,16 @@
|
||||||
<Layout noPadding gap="S">
|
<Layout noPadding gap="S">
|
||||||
{#if mounted}
|
{#if mounted}
|
||||||
<Input
|
<Input
|
||||||
|
value={editableColumn.name}
|
||||||
autofocus
|
autofocus
|
||||||
bind:value={editableColumn.name}
|
on:input={e => {
|
||||||
|
if (
|
||||||
|
!uneditable &&
|
||||||
|
!(linkEditDisabled && editableColumn.type === LINK_TYPE)
|
||||||
|
) {
|
||||||
|
editableColumn.name = e.target.value
|
||||||
|
}
|
||||||
|
}}
|
||||||
disabled={uneditable ||
|
disabled={uneditable ||
|
||||||
(linkEditDisabled && editableColumn.type === LINK_TYPE)}
|
(linkEditDisabled && editableColumn.type === LINK_TYPE)}
|
||||||
error={errors?.name}
|
error={errors?.name}
|
||||||
|
|
|
@ -16,7 +16,8 @@
|
||||||
export let wide = false
|
export let wide = false
|
||||||
export let extraWide = false
|
export let extraWide = false
|
||||||
export let closeButtonIcon = "Close"
|
export let closeButtonIcon = "Close"
|
||||||
|
export let noHeaderBorder = false
|
||||||
|
export let titleCSS = true
|
||||||
$: customHeaderContent = $$slots["panel-header-content"]
|
$: customHeaderContent = $$slots["panel-header-content"]
|
||||||
$: customTitleContent = $$slots["panel-title-content"]
|
$: customTitleContent = $$slots["panel-title-content"]
|
||||||
</script>
|
</script>
|
||||||
|
@ -32,6 +33,7 @@
|
||||||
class="header"
|
class="header"
|
||||||
class:custom={customHeaderContent}
|
class:custom={customHeaderContent}
|
||||||
class:borderBottom={borderBottomHeader}
|
class:borderBottom={borderBottomHeader}
|
||||||
|
class:noHeaderBorder
|
||||||
>
|
>
|
||||||
{#if showBackButton}
|
{#if showBackButton}
|
||||||
<Icon name="ArrowLeft" hoverable on:click={onClickBackButton} />
|
<Icon name="ArrowLeft" hoverable on:click={onClickBackButton} />
|
||||||
|
@ -41,7 +43,7 @@
|
||||||
<Icon name={icon} />
|
<Icon name={icon} />
|
||||||
</AbsTooltip>
|
</AbsTooltip>
|
||||||
{/if}
|
{/if}
|
||||||
<div class="title">
|
<div class:title={titleCSS}>
|
||||||
{#if customTitleContent}
|
{#if customTitleContent}
|
||||||
<slot name="panel-title-content" />
|
<slot name="panel-title-content" />
|
||||||
{:else}
|
{:else}
|
||||||
|
@ -106,6 +108,10 @@
|
||||||
padding: 0 var(--spacing-l);
|
padding: 0 var(--spacing-l);
|
||||||
gap: var(--spacing-m);
|
gap: var(--spacing-m);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.noHeaderBorder {
|
||||||
|
border-bottom: none !important;
|
||||||
|
}
|
||||||
.header.borderBottom {
|
.header.borderBottom {
|
||||||
border-bottom: var(--border-light);
|
border-bottom: var(--border-light);
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
getEventContextBindings,
|
getEventContextBindings,
|
||||||
getActionBindings,
|
getActionBindings,
|
||||||
makeStateBinding,
|
makeStateBinding,
|
||||||
|
updateReferencesInObject,
|
||||||
} from "builderStore/dataBinding"
|
} from "builderStore/dataBinding"
|
||||||
import { cloneDeep } from "lodash/fp"
|
import { cloneDeep } from "lodash/fp"
|
||||||
|
|
||||||
|
@ -30,6 +31,7 @@
|
||||||
|
|
||||||
let actionQuery
|
let actionQuery
|
||||||
let selectedAction = actions?.length ? actions[0] : null
|
let selectedAction = actions?.length ? actions[0] : null
|
||||||
|
let originalActionIndex
|
||||||
|
|
||||||
const setUpdateActions = actions => {
|
const setUpdateActions = actions => {
|
||||||
return actions
|
return actions
|
||||||
|
@ -115,6 +117,14 @@
|
||||||
if (isSelected) {
|
if (isSelected) {
|
||||||
selectedAction = actions?.length ? actions[0] : null
|
selectedAction = actions?.length ? actions[0] : null
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update action binding references
|
||||||
|
updateReferencesInObject({
|
||||||
|
obj: actions,
|
||||||
|
modifiedIndex: index,
|
||||||
|
action: "delete",
|
||||||
|
label: "actions",
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
const toggleActionList = () => {
|
const toggleActionList = () => {
|
||||||
|
@ -137,6 +147,7 @@
|
||||||
|
|
||||||
const selectAction = action => () => {
|
const selectAction = action => () => {
|
||||||
selectedAction = action
|
selectedAction = action
|
||||||
|
originalActionIndex = actions.findIndex(item => item.id === action.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
const onAddAction = actionType => {
|
const onAddAction = actionType => {
|
||||||
|
@ -146,9 +157,29 @@
|
||||||
|
|
||||||
function handleDndConsider(e) {
|
function handleDndConsider(e) {
|
||||||
actions = e.detail.items
|
actions = e.detail.items
|
||||||
|
|
||||||
|
// set the initial index of the action being dragged
|
||||||
|
if (e.detail.info.trigger === "draggedEntered") {
|
||||||
|
originalActionIndex = actions.findIndex(
|
||||||
|
action => action.id === e.detail.info.id
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
function handleDndFinalize(e) {
|
function handleDndFinalize(e) {
|
||||||
actions = e.detail.items
|
actions = e.detail.items
|
||||||
|
|
||||||
|
// Update action binding references
|
||||||
|
updateReferencesInObject({
|
||||||
|
obj: actions,
|
||||||
|
modifiedIndex: actions.findIndex(
|
||||||
|
action => action.id === e.detail.info.id
|
||||||
|
),
|
||||||
|
action: "move",
|
||||||
|
label: "actions",
|
||||||
|
originalIndex: originalActionIndex,
|
||||||
|
})
|
||||||
|
|
||||||
|
originalActionIndex = -1
|
||||||
}
|
}
|
||||||
|
|
||||||
const getAllBindings = (actionBindings, eventContextBindings, actions) => {
|
const getAllBindings = (actionBindings, eventContextBindings, actions) => {
|
||||||
|
@ -289,7 +320,7 @@
|
||||||
</Layout>
|
</Layout>
|
||||||
<Layout noPadding>
|
<Layout noPadding>
|
||||||
{#if selectedActionComponent && !showAvailableActions}
|
{#if selectedActionComponent && !showAvailableActions}
|
||||||
{#key selectedAction.id}
|
{#key (selectedAction.id, originalActionIndex)}
|
||||||
<div class="selected-action-container">
|
<div class="selected-action-container">
|
||||||
<svelte:component
|
<svelte:component
|
||||||
this={selectedActionComponent}
|
this={selectedActionComponent}
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
export let app
|
export let app
|
||||||
export let published
|
export let published
|
||||||
let includeInternalTablesRows = true
|
let includeInternalTablesRows = true
|
||||||
let encypt = true
|
let encrypt = true
|
||||||
|
|
||||||
let password = null
|
let password = null
|
||||||
const validation = createValidationStore()
|
const validation = createValidationStore()
|
||||||
|
@ -27,9 +27,9 @@
|
||||||
$: stepConfig = {
|
$: stepConfig = {
|
||||||
[Step.CONFIG]: {
|
[Step.CONFIG]: {
|
||||||
title: published ? "Export published app" : "Export latest app",
|
title: published ? "Export published app" : "Export latest app",
|
||||||
confirmText: encypt ? "Continue" : exportButtonText,
|
confirmText: encrypt ? "Continue" : exportButtonText,
|
||||||
onConfirm: () => {
|
onConfirm: () => {
|
||||||
if (!encypt) {
|
if (!encrypt) {
|
||||||
exportApp()
|
exportApp()
|
||||||
} else {
|
} else {
|
||||||
currentStep = Step.SET_PASSWORD
|
currentStep = Step.SET_PASSWORD
|
||||||
|
@ -46,7 +46,7 @@
|
||||||
if (!$validation.valid) {
|
if (!$validation.valid) {
|
||||||
return keepOpen
|
return keepOpen
|
||||||
}
|
}
|
||||||
exportApp(password)
|
await exportApp(password)
|
||||||
},
|
},
|
||||||
isValid: $validation.valid,
|
isValid: $validation.valid,
|
||||||
},
|
},
|
||||||
|
@ -109,13 +109,13 @@
|
||||||
text="Export rows from internal tables"
|
text="Export rows from internal tables"
|
||||||
bind:value={includeInternalTablesRows}
|
bind:value={includeInternalTablesRows}
|
||||||
/>
|
/>
|
||||||
<Toggle text="Encrypt my export" bind:value={encypt} />
|
<Toggle text="Encrypt my export" bind:value={encrypt} />
|
||||||
</Body>
|
</Body>
|
||||||
{#if !encypt}
|
<InlineAlert
|
||||||
<InlineAlert
|
header={encrypt
|
||||||
header="Do not share your budibase application exports publicly as they may contain sensitive information such as database credentials or secret keys."
|
? "Please note Budibase does not encrypt attachments during the export process to ensure efficient export of large attachments."
|
||||||
/>
|
: "Do not share your Budibase application exports publicly as they may contain sensitive information such as database credentials or secret keys."}
|
||||||
{/if}
|
/>
|
||||||
{/if}
|
{/if}
|
||||||
{#if currentStep === Step.SET_PASSWORD}
|
{#if currentStep === Step.SET_PASSWORD}
|
||||||
<Input
|
<Input
|
||||||
|
|
|
@ -110,7 +110,7 @@
|
||||||
}
|
}
|
||||||
|
|
||||||
.setup {
|
.setup {
|
||||||
padding-top: var(--spectrum-global-dimension-size-200);
|
padding-top: 9px;
|
||||||
border-left: var(--border-light);
|
border-left: var(--border-light);
|
||||||
display: flex;
|
display: flex;
|
||||||
flex-direction: column;
|
flex-direction: column;
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
x => x.value === users.getUserRole(row)
|
x => x.value === users.getUserRole(row)
|
||||||
)
|
)
|
||||||
$: value = role?.label || "Not available"
|
$: value = role?.label || "Not available"
|
||||||
$: tooltip = role.subtitle || ""
|
$: tooltip = role?.subtitle || ""
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
<div on:click|stopPropagation title={tooltip}>
|
<div on:click|stopPropagation title={tooltip}>
|
||||||
|
|
|
@ -81,13 +81,21 @@ export function createTablesStore() {
|
||||||
replaceTable(savedTable._id, savedTable)
|
replaceTable(savedTable._id, savedTable)
|
||||||
select(savedTable._id)
|
select(savedTable._id)
|
||||||
// make sure tables up to date (related)
|
// make sure tables up to date (related)
|
||||||
let tableIdsToFetch = []
|
let newTableIds = []
|
||||||
for (let column of Object.values(updatedTable?.schema || {})) {
|
for (let column of Object.values(updatedTable?.schema || {})) {
|
||||||
if (column.type === FIELDS.LINK.type) {
|
if (column.type === FIELDS.LINK.type) {
|
||||||
tableIdsToFetch.push(column.tableId)
|
newTableIds.push(column.tableId)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tableIdsToFetch = [...new Set(tableIdsToFetch)]
|
|
||||||
|
let oldTableIds = []
|
||||||
|
for (let column of Object.values(oldTable?.schema || {})) {
|
||||||
|
if (column.type === FIELDS.LINK.type) {
|
||||||
|
oldTableIds.push(column.tableId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const tableIdsToFetch = [...new Set([...newTableIds, ...oldTableIds])]
|
||||||
// too many tables to fetch, just get all
|
// too many tables to fetch, just get all
|
||||||
if (tableIdsToFetch.length > 3) {
|
if (tableIdsToFetch.length > 3) {
|
||||||
await fetch()
|
await fetch()
|
||||||
|
|
|
@ -2152,7 +2152,7 @@
|
||||||
"/applications/{appId}/publish": {
|
"/applications/{appId}/publish": {
|
||||||
"post": {
|
"post": {
|
||||||
"operationId": "appPublish",
|
"operationId": "appPublish",
|
||||||
"summary": "Unpublish an application",
|
"summary": "Publish an application",
|
||||||
"tags": [
|
"tags": [
|
||||||
"applications"
|
"applications"
|
||||||
],
|
],
|
||||||
|
|
|
@ -1761,7 +1761,7 @@ paths:
|
||||||
"/applications/{appId}/publish":
|
"/applications/{appId}/publish":
|
||||||
post:
|
post:
|
||||||
operationId: appPublish
|
operationId: appPublish
|
||||||
summary: Unpublish an application
|
summary: Publish an application
|
||||||
tags:
|
tags:
|
||||||
- applications
|
- applications
|
||||||
parameters:
|
parameters:
|
||||||
|
|
|
@ -9,7 +9,7 @@ import { quotas } from "@budibase/pro"
|
||||||
import { events, context, utils, constants } from "@budibase/backend-core"
|
import { events, context, utils, constants } from "@budibase/backend-core"
|
||||||
import sdk from "../../../sdk"
|
import sdk from "../../../sdk"
|
||||||
import { QueryEvent } from "../../../threads/definitions"
|
import { QueryEvent } from "../../../threads/definitions"
|
||||||
import { ConfigType, Query, UserCtx } from "@budibase/types"
|
import { ConfigType, Query, UserCtx, SessionCookie } from "@budibase/types"
|
||||||
import { ValidQueryNameRegex } from "@budibase/shared-core"
|
import { ValidQueryNameRegex } from "@budibase/shared-core"
|
||||||
|
|
||||||
const Runner = new Thread(ThreadType.QUERY, {
|
const Runner = new Thread(ThreadType.QUERY, {
|
||||||
|
@ -113,7 +113,7 @@ function getOAuthConfigCookieId(ctx: UserCtx) {
|
||||||
}
|
}
|
||||||
|
|
||||||
function getAuthConfig(ctx: UserCtx) {
|
function getAuthConfig(ctx: UserCtx) {
|
||||||
const authCookie = utils.getCookie(ctx, constants.Cookie.Auth)
|
const authCookie = utils.getCookie<SessionCookie>(ctx, constants.Cookie.Auth)
|
||||||
let authConfigCtx: any = {}
|
let authConfigCtx: any = {}
|
||||||
authConfigCtx["configId"] = getOAuthConfigCookieId(ctx)
|
authConfigCtx["configId"] = getOAuthConfigCookieId(ctx)
|
||||||
authConfigCtx["sessionId"] = authCookie ? authCookie.sessionId : null
|
authConfigCtx["sessionId"] = authCookie ? authCookie.sessionId : null
|
||||||
|
|
|
@ -24,7 +24,7 @@ import AWS from "aws-sdk"
|
||||||
import fs from "fs"
|
import fs from "fs"
|
||||||
import sdk from "../../../sdk"
|
import sdk from "../../../sdk"
|
||||||
import * as pro from "@budibase/pro"
|
import * as pro from "@budibase/pro"
|
||||||
import { App, Ctx, ProcessAttachmentResponse, Upload } from "@budibase/types"
|
import { App, Ctx, ProcessAttachmentResponse } from "@budibase/types"
|
||||||
|
|
||||||
const send = require("koa-send")
|
const send = require("koa-send")
|
||||||
|
|
||||||
|
@ -212,7 +212,9 @@ export const serveBuilderPreview = async function (ctx: Ctx) {
|
||||||
|
|
||||||
if (!env.isJest()) {
|
if (!env.isJest()) {
|
||||||
let appId = context.getAppId()
|
let appId = context.getAppId()
|
||||||
const previewHbs = loadHandlebarsFile(`${__dirname}/preview.hbs`)
|
const templateLoc = join(__dirname, "templates")
|
||||||
|
const previewLoc = fs.existsSync(templateLoc) ? templateLoc : __dirname
|
||||||
|
const previewHbs = loadHandlebarsFile(join(previewLoc, "preview.hbs"))
|
||||||
ctx.body = await processString(previewHbs, {
|
ctx.body = await processString(previewHbs, {
|
||||||
clientLibPath: objectStore.clientLibraryUrl(appId!, appInfo.version),
|
clientLibPath: objectStore.clientLibraryUrl(appId!, appInfo.version),
|
||||||
})
|
})
|
||||||
|
|
|
@ -517,9 +517,24 @@ describe.each([
|
||||||
})
|
})
|
||||||
|
|
||||||
describe("patch", () => {
|
describe("patch", () => {
|
||||||
|
let otherTable: Table
|
||||||
|
|
||||||
beforeAll(async () => {
|
beforeAll(async () => {
|
||||||
const tableConfig = generateTableConfig()
|
const tableConfig = generateTableConfig()
|
||||||
table = await createTable(tableConfig)
|
table = await createTable(tableConfig)
|
||||||
|
const otherTableConfig = generateTableConfig()
|
||||||
|
// need a short name of table here - for relationship tests
|
||||||
|
otherTableConfig.name = "a"
|
||||||
|
otherTableConfig.schema.relationship = {
|
||||||
|
name: "relationship",
|
||||||
|
relationshipType: RelationshipType.ONE_TO_MANY,
|
||||||
|
type: FieldType.LINK,
|
||||||
|
tableId: table._id!,
|
||||||
|
fieldName: "relationship",
|
||||||
|
}
|
||||||
|
otherTable = await createTable(otherTableConfig)
|
||||||
|
// need to set the config back to the original table
|
||||||
|
config.table = table
|
||||||
})
|
})
|
||||||
|
|
||||||
it("should update only the fields that are supplied", async () => {
|
it("should update only the fields that are supplied", async () => {
|
||||||
|
@ -615,6 +630,28 @@ describe.each([
|
||||||
expect(getResp.body.user1[0]._id).toEqual(user2._id)
|
expect(getResp.body.user1[0]._id).toEqual(user2._id)
|
||||||
expect(getResp.body.user2[0]._id).toEqual(user2._id)
|
expect(getResp.body.user2[0]._id).toEqual(user2._id)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
it("should be able to update relationships when both columns are same name", async () => {
|
||||||
|
let row = await config.api.row.save(table._id!, {
|
||||||
|
name: "test",
|
||||||
|
description: "test",
|
||||||
|
})
|
||||||
|
let row2 = await config.api.row.save(otherTable._id!, {
|
||||||
|
name: "test",
|
||||||
|
description: "test",
|
||||||
|
relationship: [row._id],
|
||||||
|
})
|
||||||
|
row = (await config.api.row.get(table._id!, row._id!)).body
|
||||||
|
expect(row.relationship.length).toBe(1)
|
||||||
|
const resp = await config.api.row.patch(table._id!, {
|
||||||
|
_id: row._id!,
|
||||||
|
_rev: row._rev!,
|
||||||
|
tableId: row.tableId!,
|
||||||
|
name: "test2",
|
||||||
|
relationship: [row2._id],
|
||||||
|
})
|
||||||
|
expect(resp.relationship.length).toBe(1)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe("destroy", () => {
|
describe("destroy", () => {
|
||||||
|
|
|
@ -251,9 +251,19 @@ class LinkController {
|
||||||
// find the docs that need to be deleted
|
// find the docs that need to be deleted
|
||||||
let toDeleteDocs = thisFieldLinkDocs
|
let toDeleteDocs = thisFieldLinkDocs
|
||||||
.filter(doc => {
|
.filter(doc => {
|
||||||
let correctDoc =
|
let correctDoc
|
||||||
doc.doc1.fieldName === fieldName ? doc.doc2 : doc.doc1
|
if (
|
||||||
return rowField.indexOf(correctDoc.rowId) === -1
|
doc.doc1.tableId === table._id! &&
|
||||||
|
doc.doc1.fieldName === fieldName
|
||||||
|
) {
|
||||||
|
correctDoc = doc.doc2
|
||||||
|
} else if (
|
||||||
|
doc.doc2.tableId === table._id! &&
|
||||||
|
doc.doc2.fieldName === fieldName
|
||||||
|
) {
|
||||||
|
correctDoc = doc.doc1
|
||||||
|
}
|
||||||
|
return correctDoc && rowField.indexOf(correctDoc.rowId) === -1
|
||||||
})
|
})
|
||||||
.map(doc => {
|
.map(doc => {
|
||||||
return { ...doc, _deleted: true }
|
return { ...doc, _deleted: true }
|
||||||
|
|
|
@ -59,6 +59,7 @@ const environment = {
|
||||||
BB_ADMIN_USER_PASSWORD: process.env.BB_ADMIN_USER_PASSWORD,
|
BB_ADMIN_USER_PASSWORD: process.env.BB_ADMIN_USER_PASSWORD,
|
||||||
PLUGINS_DIR: process.env.PLUGINS_DIR || "/plugins",
|
PLUGINS_DIR: process.env.PLUGINS_DIR || "/plugins",
|
||||||
OPENAI_API_KEY: process.env.OPENAI_API_KEY,
|
OPENAI_API_KEY: process.env.OPENAI_API_KEY,
|
||||||
|
MAX_IMPORT_SIZE_MB: process.env.MAX_IMPORT_SIZE_MB,
|
||||||
// flags
|
// flags
|
||||||
ALLOW_DEV_AUTOMATIONS: process.env.ALLOW_DEV_AUTOMATIONS,
|
ALLOW_DEV_AUTOMATIONS: process.env.ALLOW_DEV_AUTOMATIONS,
|
||||||
DISABLE_THREADING: process.env.DISABLE_THREADING,
|
DISABLE_THREADING: process.env.DISABLE_THREADING,
|
||||||
|
|
|
@ -934,25 +934,43 @@ describe("postgres integrations", () => {
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
})
|
})
|
||||||
|
const m2oRel = {
|
||||||
|
[m2oFieldName]: [
|
||||||
|
{
|
||||||
|
_id: row._id,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
expect(res.body[m2oFieldName]).toEqual([
|
expect(res.body[m2oFieldName]).toEqual([
|
||||||
{
|
{
|
||||||
|
...m2oRel,
|
||||||
...foreignRowsByType[RelationshipType.MANY_TO_ONE][0].row,
|
...foreignRowsByType[RelationshipType.MANY_TO_ONE][0].row,
|
||||||
[`fk_${manyToOneRelationshipInfo.table.name}_${manyToOneRelationshipInfo.fieldName}`]:
|
[`fk_${manyToOneRelationshipInfo.table.name}_${manyToOneRelationshipInfo.fieldName}`]:
|
||||||
row.id,
|
row.id,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
...m2oRel,
|
||||||
...foreignRowsByType[RelationshipType.MANY_TO_ONE][1].row,
|
...foreignRowsByType[RelationshipType.MANY_TO_ONE][1].row,
|
||||||
[`fk_${manyToOneRelationshipInfo.table.name}_${manyToOneRelationshipInfo.fieldName}`]:
|
[`fk_${manyToOneRelationshipInfo.table.name}_${manyToOneRelationshipInfo.fieldName}`]:
|
||||||
row.id,
|
row.id,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
...m2oRel,
|
||||||
...foreignRowsByType[RelationshipType.MANY_TO_ONE][2].row,
|
...foreignRowsByType[RelationshipType.MANY_TO_ONE][2].row,
|
||||||
[`fk_${manyToOneRelationshipInfo.table.name}_${manyToOneRelationshipInfo.fieldName}`]:
|
[`fk_${manyToOneRelationshipInfo.table.name}_${manyToOneRelationshipInfo.fieldName}`]:
|
||||||
row.id,
|
row.id,
|
||||||
},
|
},
|
||||||
])
|
])
|
||||||
|
const o2mRel = {
|
||||||
|
[o2mFieldName]: [
|
||||||
|
{
|
||||||
|
_id: row._id,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
expect(res.body[o2mFieldName]).toEqual([
|
expect(res.body[o2mFieldName]).toEqual([
|
||||||
{
|
{
|
||||||
|
...o2mRel,
|
||||||
...foreignRowsByType[RelationshipType.ONE_TO_MANY][0].row,
|
...foreignRowsByType[RelationshipType.ONE_TO_MANY][0].row,
|
||||||
_id: expect.any(String),
|
_id: expect.any(String),
|
||||||
_rev: expect.any(String),
|
_rev: expect.any(String),
|
||||||
|
|
|
@ -17,7 +17,7 @@ import {
|
||||||
import {
|
import {
|
||||||
getSqlQuery,
|
getSqlQuery,
|
||||||
buildExternalTableId,
|
buildExternalTableId,
|
||||||
convertSqlType,
|
generateColumnDefinition,
|
||||||
finaliseExternalTables,
|
finaliseExternalTables,
|
||||||
SqlClient,
|
SqlClient,
|
||||||
checkExternalTables,
|
checkExternalTables,
|
||||||
|
@ -429,15 +429,12 @@ class SqlServerIntegration extends Sql implements DatasourcePlus {
|
||||||
const hasDefault = def.COLUMN_DEFAULT
|
const hasDefault = def.COLUMN_DEFAULT
|
||||||
const isAuto = !!autoColumns.find(col => col === name)
|
const isAuto = !!autoColumns.find(col => col === name)
|
||||||
const required = !!requiredColumns.find(col => col === name)
|
const required = !!requiredColumns.find(col => col === name)
|
||||||
schema[name] = {
|
schema[name] = generateColumnDefinition({
|
||||||
autocolumn: isAuto,
|
autocolumn: isAuto,
|
||||||
name: name,
|
name,
|
||||||
constraints: {
|
presence: required && !isAuto && !hasDefault,
|
||||||
presence: required && !isAuto && !hasDefault,
|
|
||||||
},
|
|
||||||
...convertSqlType(def.DATA_TYPE),
|
|
||||||
externalType: def.DATA_TYPE,
|
externalType: def.DATA_TYPE,
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
tables[tableName] = {
|
tables[tableName] = {
|
||||||
_id: buildExternalTableId(datasourceId, tableName),
|
_id: buildExternalTableId(datasourceId, tableName),
|
||||||
|
|
|
@ -12,12 +12,13 @@ import {
|
||||||
SourceName,
|
SourceName,
|
||||||
Schema,
|
Schema,
|
||||||
TableSourceType,
|
TableSourceType,
|
||||||
|
FieldType,
|
||||||
} from "@budibase/types"
|
} from "@budibase/types"
|
||||||
import {
|
import {
|
||||||
getSqlQuery,
|
getSqlQuery,
|
||||||
SqlClient,
|
SqlClient,
|
||||||
buildExternalTableId,
|
buildExternalTableId,
|
||||||
convertSqlType,
|
generateColumnDefinition,
|
||||||
finaliseExternalTables,
|
finaliseExternalTables,
|
||||||
checkExternalTables,
|
checkExternalTables,
|
||||||
} from "./utils"
|
} from "./utils"
|
||||||
|
@ -305,16 +306,17 @@ class MySQLIntegration extends Sql implements DatasourcePlus {
|
||||||
(column.Extra === "auto_increment" ||
|
(column.Extra === "auto_increment" ||
|
||||||
column.Extra.toLowerCase().includes("generated"))
|
column.Extra.toLowerCase().includes("generated"))
|
||||||
const required = column.Null !== "YES"
|
const required = column.Null !== "YES"
|
||||||
const constraints = {
|
schema[columnName] = generateColumnDefinition({
|
||||||
presence: required && !isAuto && !hasDefault,
|
|
||||||
}
|
|
||||||
schema[columnName] = {
|
|
||||||
name: columnName,
|
name: columnName,
|
||||||
autocolumn: isAuto,
|
autocolumn: isAuto,
|
||||||
constraints,
|
presence: required && !isAuto && !hasDefault,
|
||||||
...convertSqlType(column.Type),
|
|
||||||
externalType: column.Type,
|
externalType: column.Type,
|
||||||
}
|
options: column.Type.startsWith("enum")
|
||||||
|
? column.Type.substring(5, column.Type.length - 1)
|
||||||
|
.split(",")
|
||||||
|
.map(str => str.replace(/^'(.*)'$/, "$1"))
|
||||||
|
: undefined,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
if (!tables[tableName]) {
|
if (!tables[tableName]) {
|
||||||
tables[tableName] = {
|
tables[tableName] = {
|
||||||
|
|
|
@ -15,7 +15,7 @@ import {
|
||||||
import {
|
import {
|
||||||
buildExternalTableId,
|
buildExternalTableId,
|
||||||
checkExternalTables,
|
checkExternalTables,
|
||||||
convertSqlType,
|
generateColumnDefinition,
|
||||||
finaliseExternalTables,
|
finaliseExternalTables,
|
||||||
getSqlQuery,
|
getSqlQuery,
|
||||||
SqlClient,
|
SqlClient,
|
||||||
|
@ -250,14 +250,6 @@ class OracleIntegration extends Sql implements DatasourcePlus {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
private internalConvertType(column: OracleColumn) {
|
|
||||||
if (this.isBooleanType(column)) {
|
|
||||||
return { type: FieldTypes.BOOLEAN }
|
|
||||||
}
|
|
||||||
|
|
||||||
return convertSqlType(column.type)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Fetches the tables from the oracle table and assigns them to the datasource.
|
* Fetches the tables from the oracle table and assigns them to the datasource.
|
||||||
* @param datasourceId - datasourceId to fetch
|
* @param datasourceId - datasourceId to fetch
|
||||||
|
@ -302,13 +294,15 @@ class OracleIntegration extends Sql implements DatasourcePlus {
|
||||||
const columnName = oracleColumn.name
|
const columnName = oracleColumn.name
|
||||||
let fieldSchema = table.schema[columnName]
|
let fieldSchema = table.schema[columnName]
|
||||||
if (!fieldSchema) {
|
if (!fieldSchema) {
|
||||||
fieldSchema = {
|
fieldSchema = generateColumnDefinition({
|
||||||
autocolumn: OracleIntegration.isAutoColumn(oracleColumn),
|
autocolumn: OracleIntegration.isAutoColumn(oracleColumn),
|
||||||
name: columnName,
|
name: columnName,
|
||||||
constraints: {
|
presence: false,
|
||||||
presence: false,
|
externalType: oracleColumn.type,
|
||||||
},
|
})
|
||||||
...this.internalConvertType(oracleColumn),
|
|
||||||
|
if (this.isBooleanType(oracleColumn)) {
|
||||||
|
fieldSchema.type = FieldTypes.BOOLEAN
|
||||||
}
|
}
|
||||||
|
|
||||||
table.schema[columnName] = fieldSchema
|
table.schema[columnName] = fieldSchema
|
||||||
|
|
|
@ -16,7 +16,7 @@ import {
|
||||||
import {
|
import {
|
||||||
getSqlQuery,
|
getSqlQuery,
|
||||||
buildExternalTableId,
|
buildExternalTableId,
|
||||||
convertSqlType,
|
generateColumnDefinition,
|
||||||
finaliseExternalTables,
|
finaliseExternalTables,
|
||||||
SqlClient,
|
SqlClient,
|
||||||
checkExternalTables,
|
checkExternalTables,
|
||||||
|
@ -162,6 +162,14 @@ class PostgresIntegration extends Sql implements DatasourcePlus {
|
||||||
WHERE pg_namespace.nspname = '${this.config.schema}';
|
WHERE pg_namespace.nspname = '${this.config.schema}';
|
||||||
`
|
`
|
||||||
|
|
||||||
|
ENUM_VALUES = () => `
|
||||||
|
SELECT t.typname,
|
||||||
|
e.enumlabel
|
||||||
|
FROM pg_type t
|
||||||
|
JOIN pg_enum e on t.oid = e.enumtypid
|
||||||
|
JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace;
|
||||||
|
`
|
||||||
|
|
||||||
constructor(config: PostgresConfig) {
|
constructor(config: PostgresConfig) {
|
||||||
super(SqlClient.POSTGRES)
|
super(SqlClient.POSTGRES)
|
||||||
this.config = config
|
this.config = config
|
||||||
|
@ -303,6 +311,18 @@ class PostgresIntegration extends Sql implements DatasourcePlus {
|
||||||
|
|
||||||
const tables: { [key: string]: Table } = {}
|
const tables: { [key: string]: Table } = {}
|
||||||
|
|
||||||
|
// Fetch enum values
|
||||||
|
const enumsResponse = await this.client.query(this.ENUM_VALUES())
|
||||||
|
const enumValues = enumsResponse.rows?.reduce((acc, row) => {
|
||||||
|
if (!acc[row.typname]) {
|
||||||
|
return {
|
||||||
|
[row.typname]: [row.enumlabel],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
acc[row.typname].push(row.enumlabel)
|
||||||
|
return acc
|
||||||
|
}, {})
|
||||||
|
|
||||||
for (let column of columnsResponse.rows) {
|
for (let column of columnsResponse.rows) {
|
||||||
const tableName: string = column.table_name
|
const tableName: string = column.table_name
|
||||||
const columnName: string = column.column_name
|
const columnName: string = column.column_name
|
||||||
|
@ -333,16 +353,13 @@ class PostgresIntegration extends Sql implements DatasourcePlus {
|
||||||
column.is_generated && column.is_generated !== "NEVER"
|
column.is_generated && column.is_generated !== "NEVER"
|
||||||
const isAuto: boolean = hasNextVal || identity || isGenerated
|
const isAuto: boolean = hasNextVal || identity || isGenerated
|
||||||
const required = column.is_nullable === "NO"
|
const required = column.is_nullable === "NO"
|
||||||
const constraints = {
|
tables[tableName].schema[columnName] = generateColumnDefinition({
|
||||||
presence: required && !hasDefault && !isGenerated,
|
|
||||||
}
|
|
||||||
tables[tableName].schema[columnName] = {
|
|
||||||
autocolumn: isAuto,
|
autocolumn: isAuto,
|
||||||
name: columnName,
|
name: columnName,
|
||||||
constraints,
|
presence: required && !hasDefault && !isGenerated,
|
||||||
...convertSqlType(column.data_type),
|
|
||||||
externalType: column.data_type,
|
externalType: column.data_type,
|
||||||
}
|
options: enumValues?.[column.udt_name],
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
let finalizedTables = finaliseExternalTables(tables, entities)
|
let finalizedTables = finaliseExternalTables(tables, entities)
|
||||||
|
|
|
@ -67,6 +67,10 @@ const SQL_BOOLEAN_TYPE_MAP = {
|
||||||
tinyint: FieldType.BOOLEAN,
|
tinyint: FieldType.BOOLEAN,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const SQL_OPTIONS_TYPE_MAP = {
|
||||||
|
"user-defined": FieldType.OPTIONS,
|
||||||
|
}
|
||||||
|
|
||||||
const SQL_MISC_TYPE_MAP = {
|
const SQL_MISC_TYPE_MAP = {
|
||||||
json: FieldType.JSON,
|
json: FieldType.JSON,
|
||||||
bigint: FieldType.BIGINT,
|
bigint: FieldType.BIGINT,
|
||||||
|
@ -78,6 +82,7 @@ const SQL_TYPE_MAP = {
|
||||||
...SQL_STRING_TYPE_MAP,
|
...SQL_STRING_TYPE_MAP,
|
||||||
...SQL_BOOLEAN_TYPE_MAP,
|
...SQL_BOOLEAN_TYPE_MAP,
|
||||||
...SQL_MISC_TYPE_MAP,
|
...SQL_MISC_TYPE_MAP,
|
||||||
|
...SQL_OPTIONS_TYPE_MAP,
|
||||||
}
|
}
|
||||||
|
|
||||||
export enum SqlClient {
|
export enum SqlClient {
|
||||||
|
@ -178,25 +183,49 @@ export function breakRowIdField(_id: string | { _id: string }): any[] {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export function convertSqlType(type: string) {
|
export function generateColumnDefinition(config: {
|
||||||
|
externalType: string
|
||||||
|
autocolumn: boolean
|
||||||
|
name: string
|
||||||
|
presence: boolean
|
||||||
|
options?: string[]
|
||||||
|
}) {
|
||||||
|
let { externalType, autocolumn, name, presence, options } = config
|
||||||
let foundType = FieldType.STRING
|
let foundType = FieldType.STRING
|
||||||
const lcType = type.toLowerCase()
|
const lowerCaseType = externalType.toLowerCase()
|
||||||
let matchingTypes = []
|
let matchingTypes = []
|
||||||
for (let [external, internal] of Object.entries(SQL_TYPE_MAP)) {
|
for (let [external, internal] of Object.entries(SQL_TYPE_MAP)) {
|
||||||
if (lcType.includes(external)) {
|
if (lowerCaseType.includes(external)) {
|
||||||
matchingTypes.push({ external, internal })
|
matchingTypes.push({ external, internal })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//Set the foundType based the longest match
|
// Set the foundType based the longest match
|
||||||
if (matchingTypes.length > 0) {
|
if (matchingTypes.length > 0) {
|
||||||
foundType = matchingTypes.reduce((acc, val) => {
|
foundType = matchingTypes.reduce((acc, val) => {
|
||||||
return acc.external.length >= val.external.length ? acc : val
|
return acc.external.length >= val.external.length ? acc : val
|
||||||
}).internal
|
}).internal
|
||||||
}
|
}
|
||||||
const schema: any = { type: foundType }
|
|
||||||
|
const constraints: {
|
||||||
|
presence: boolean
|
||||||
|
inclusion?: string[]
|
||||||
|
} = {
|
||||||
|
presence,
|
||||||
|
}
|
||||||
|
if (foundType === FieldType.OPTIONS) {
|
||||||
|
constraints.inclusion = options
|
||||||
|
}
|
||||||
|
|
||||||
|
const schema: any = {
|
||||||
|
type: foundType,
|
||||||
|
externalType,
|
||||||
|
autocolumn,
|
||||||
|
name,
|
||||||
|
constraints,
|
||||||
|
}
|
||||||
if (foundType === FieldType.DATETIME) {
|
if (foundType === FieldType.DATETIME) {
|
||||||
schema.dateOnly = SQL_DATE_ONLY_TYPES.includes(lcType)
|
schema.dateOnly = SQL_DATE_ONLY_TYPES.includes(lowerCaseType)
|
||||||
schema.timeOnly = SQL_TIME_ONLY_TYPES.includes(lcType)
|
schema.timeOnly = SQL_TIME_ONLY_TYPES.includes(lowerCaseType)
|
||||||
}
|
}
|
||||||
return schema
|
return schema
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
import env from "./environment"
|
import env from "./environment"
|
||||||
import Koa, { ExtendableContext } from "koa"
|
import Koa from "koa"
|
||||||
import koaBody from "koa-body"
|
import koaBody from "koa-body"
|
||||||
import http from "http"
|
import http from "http"
|
||||||
import * as api from "./api"
|
import * as api from "./api"
|
||||||
|
@ -27,6 +27,9 @@ export default function createKoaApp() {
|
||||||
// @ts-ignore
|
// @ts-ignore
|
||||||
enableTypes: ["json", "form", "text"],
|
enableTypes: ["json", "form", "text"],
|
||||||
parsedMethods: ["POST", "PUT", "PATCH", "DELETE"],
|
parsedMethods: ["POST", "PUT", "PATCH", "DELETE"],
|
||||||
|
formidable: {
|
||||||
|
maxFileSize: parseInt(env.MAX_IMPORT_SIZE_MB || "100") * 1024 * 1024,
|
||||||
|
},
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,7 @@ import {
|
||||||
tenancy,
|
tenancy,
|
||||||
context,
|
context,
|
||||||
users,
|
users,
|
||||||
|
auth,
|
||||||
} from "@budibase/backend-core"
|
} from "@budibase/backend-core"
|
||||||
import { generateUserMetadataID, isDevAppID } from "../db/utils"
|
import { generateUserMetadataID, isDevAppID } from "../db/utils"
|
||||||
import { getCachedSelf } from "../utilities/global"
|
import { getCachedSelf } from "../utilities/global"
|
||||||
|
@ -69,28 +70,34 @@ export default async (ctx: UserCtx, next: any) => {
|
||||||
return next()
|
return next()
|
||||||
}
|
}
|
||||||
|
|
||||||
return context.doInAppContext(appId, async () => {
|
const userId = ctx.user ? generateUserMetadataID(ctx.user._id!) : undefined
|
||||||
// if the user not in the right tenant then make sure they have no permissions
|
|
||||||
// need to judge this only based on the request app ID,
|
|
||||||
if (
|
|
||||||
env.MULTI_TENANCY &&
|
|
||||||
ctx.user?._id &&
|
|
||||||
requestAppId &&
|
|
||||||
!tenancy.isUserInAppTenant(requestAppId, ctx.user)
|
|
||||||
) {
|
|
||||||
// don't error, simply remove the users rights (they are a public user)
|
|
||||||
ctx.user = users.cleanseUserObject(ctx.user) as ContextUser
|
|
||||||
ctx.isAuthenticated = false
|
|
||||||
roleId = roles.BUILTIN_ROLE_IDS.PUBLIC
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// if the user is not in the right tenant then make sure to wipe their cookie
|
||||||
|
// also cleanse any information about them that has been allocated
|
||||||
|
// this avoids apps making calls to say the worker which are cross tenant,
|
||||||
|
// we simply remove the authentication
|
||||||
|
if (
|
||||||
|
env.MULTI_TENANCY &&
|
||||||
|
userId &&
|
||||||
|
requestAppId &&
|
||||||
|
!tenancy.isUserInAppTenant(requestAppId, ctx.user)
|
||||||
|
) {
|
||||||
|
// clear out the user
|
||||||
|
ctx.user = users.cleanseUserObject(ctx.user) as ContextUser
|
||||||
|
ctx.isAuthenticated = false
|
||||||
|
roleId = roles.BUILTIN_ROLE_IDS.PUBLIC
|
||||||
|
// remove the cookie, so future calls are public
|
||||||
|
await auth.platformLogout({
|
||||||
|
ctx,
|
||||||
|
userId,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return context.doInAppContext(appId, async () => {
|
||||||
ctx.appId = appId
|
ctx.appId = appId
|
||||||
if (roleId) {
|
if (roleId) {
|
||||||
ctx.roleId = roleId
|
ctx.roleId = roleId
|
||||||
const globalId = ctx.user ? ctx.user._id : undefined
|
const globalId = ctx.user ? ctx.user._id : undefined
|
||||||
const userId = ctx.user
|
|
||||||
? generateUserMetadataID(ctx.user._id!)
|
|
||||||
: undefined
|
|
||||||
ctx.user = {
|
ctx.user = {
|
||||||
...ctx.user!,
|
...ctx.user!,
|
||||||
// override userID with metadata one
|
// override userID with metadata one
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
export const DB_EXPORT_FILE = "db.txt"
|
export const DB_EXPORT_FILE = "db.txt"
|
||||||
export const GLOBAL_DB_EXPORT_FILE = "global.txt"
|
export const GLOBAL_DB_EXPORT_FILE = "global.txt"
|
||||||
export const STATIC_APP_FILES = ["manifest.json", "budibase-client.js"]
|
export const STATIC_APP_FILES = ["manifest.json", "budibase-client.js"]
|
||||||
|
export const ATTACHMENT_DIRECTORY = "attachments"
|
||||||
|
|
|
@ -8,13 +8,15 @@ import {
|
||||||
TABLE_ROW_PREFIX,
|
TABLE_ROW_PREFIX,
|
||||||
USER_METDATA_PREFIX,
|
USER_METDATA_PREFIX,
|
||||||
} from "../../../db/utils"
|
} from "../../../db/utils"
|
||||||
import { DB_EXPORT_FILE, STATIC_APP_FILES } from "./constants"
|
import {
|
||||||
|
DB_EXPORT_FILE,
|
||||||
|
STATIC_APP_FILES,
|
||||||
|
ATTACHMENT_DIRECTORY,
|
||||||
|
} from "./constants"
|
||||||
import fs from "fs"
|
import fs from "fs"
|
||||||
import { join } from "path"
|
import { join } from "path"
|
||||||
import env from "../../../environment"
|
import env from "../../../environment"
|
||||||
|
import { v4 as uuid } from "uuid"
|
||||||
const uuid = require("uuid/v4")
|
|
||||||
|
|
||||||
import tar from "tar"
|
import tar from "tar"
|
||||||
|
|
||||||
const MemoryStream = require("memorystream")
|
const MemoryStream = require("memorystream")
|
||||||
|
@ -30,12 +32,11 @@ export interface ExportOpts extends DBDumpOpts {
|
||||||
encryptPassword?: string
|
encryptPassword?: string
|
||||||
}
|
}
|
||||||
|
|
||||||
function tarFilesToTmp(tmpDir: string, files: string[]) {
|
async function tarFilesToTmp(tmpDir: string, files: string[]) {
|
||||||
const fileName = `${uuid()}.tar.gz`
|
const fileName = `${uuid()}.tar.gz`
|
||||||
const exportFile = join(budibaseTempDir(), fileName)
|
const exportFile = join(budibaseTempDir(), fileName)
|
||||||
tar.create(
|
await tar.create(
|
||||||
{
|
{
|
||||||
sync: true,
|
|
||||||
gzip: true,
|
gzip: true,
|
||||||
file: exportFile,
|
file: exportFile,
|
||||||
noDirRecurse: false,
|
noDirRecurse: false,
|
||||||
|
@ -150,19 +151,21 @@ export async function exportApp(appId: string, config?: ExportOpts) {
|
||||||
for (let file of fs.readdirSync(tmpPath)) {
|
for (let file of fs.readdirSync(tmpPath)) {
|
||||||
const path = join(tmpPath, file)
|
const path = join(tmpPath, file)
|
||||||
|
|
||||||
await encryption.encryptFile(
|
// skip the attachments - too big to encrypt
|
||||||
{ dir: tmpPath, filename: file },
|
if (file !== ATTACHMENT_DIRECTORY) {
|
||||||
config.encryptPassword
|
await encryption.encryptFile(
|
||||||
)
|
{ dir: tmpPath, filename: file },
|
||||||
|
config.encryptPassword
|
||||||
fs.rmSync(path)
|
)
|
||||||
|
fs.rmSync(path)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// if tar requested, return where the tarball is
|
// if tar requested, return where the tarball is
|
||||||
if (config?.tar) {
|
if (config?.tar) {
|
||||||
// now the tmpPath contains both the DB export and attachments, tar this
|
// now the tmpPath contains both the DB export and attachments, tar this
|
||||||
const tarPath = tarFilesToTmp(tmpPath, fs.readdirSync(tmpPath))
|
const tarPath = await tarFilesToTmp(tmpPath, fs.readdirSync(tmpPath))
|
||||||
// cleanup the tmp export files as tarball returned
|
// cleanup the tmp export files as tarball returned
|
||||||
fs.rmSync(tmpPath, { recursive: true, force: true })
|
fs.rmSync(tmpPath, { recursive: true, force: true })
|
||||||
|
|
||||||
|
|
|
@ -6,17 +6,20 @@ import {
|
||||||
AutomationTriggerStepId,
|
AutomationTriggerStepId,
|
||||||
RowAttachment,
|
RowAttachment,
|
||||||
} from "@budibase/types"
|
} from "@budibase/types"
|
||||||
import { getAutomationParams, TABLE_ROW_PREFIX } from "../../../db/utils"
|
import { getAutomationParams } from "../../../db/utils"
|
||||||
import { budibaseTempDir } from "../../../utilities/budibaseDir"
|
import { budibaseTempDir } from "../../../utilities/budibaseDir"
|
||||||
import { DB_EXPORT_FILE, GLOBAL_DB_EXPORT_FILE } from "./constants"
|
import {
|
||||||
|
DB_EXPORT_FILE,
|
||||||
|
GLOBAL_DB_EXPORT_FILE,
|
||||||
|
ATTACHMENT_DIRECTORY,
|
||||||
|
} from "./constants"
|
||||||
import { downloadTemplate } from "../../../utilities/fileSystem"
|
import { downloadTemplate } from "../../../utilities/fileSystem"
|
||||||
import { ObjectStoreBuckets } from "../../../constants"
|
import { ObjectStoreBuckets } from "../../../constants"
|
||||||
import { join } from "path"
|
import { join } from "path"
|
||||||
import fs from "fs"
|
import fs from "fs"
|
||||||
import sdk from "../../"
|
import sdk from "../../"
|
||||||
|
import { v4 as uuid } from "uuid"
|
||||||
const uuid = require("uuid/v4")
|
import tar from "tar"
|
||||||
const tar = require("tar")
|
|
||||||
|
|
||||||
type TemplateType = {
|
type TemplateType = {
|
||||||
file?: {
|
file?: {
|
||||||
|
@ -114,12 +117,11 @@ async function getTemplateStream(template: TemplateType) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export function untarFile(file: { path: string }) {
|
export async function untarFile(file: { path: string }) {
|
||||||
const tmpPath = join(budibaseTempDir(), uuid())
|
const tmpPath = join(budibaseTempDir(), uuid())
|
||||||
fs.mkdirSync(tmpPath)
|
fs.mkdirSync(tmpPath)
|
||||||
// extract the tarball
|
// extract the tarball
|
||||||
tar.extract({
|
await tar.extract({
|
||||||
sync: true,
|
|
||||||
cwd: tmpPath,
|
cwd: tmpPath,
|
||||||
file: file.path,
|
file: file.path,
|
||||||
})
|
})
|
||||||
|
@ -130,9 +132,11 @@ async function decryptFiles(path: string, password: string) {
|
||||||
try {
|
try {
|
||||||
for (let file of fs.readdirSync(path)) {
|
for (let file of fs.readdirSync(path)) {
|
||||||
const inputPath = join(path, file)
|
const inputPath = join(path, file)
|
||||||
const outputPath = inputPath.replace(/\.enc$/, "")
|
if (!inputPath.endsWith(ATTACHMENT_DIRECTORY)) {
|
||||||
await encryption.decryptFile(inputPath, outputPath, password)
|
const outputPath = inputPath.replace(/\.enc$/, "")
|
||||||
fs.rmSync(inputPath)
|
await encryption.decryptFile(inputPath, outputPath, password)
|
||||||
|
fs.rmSync(inputPath)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} catch (err: any) {
|
} catch (err: any) {
|
||||||
if (err.message === "incorrect header check") {
|
if (err.message === "incorrect header check") {
|
||||||
|
@ -162,7 +166,7 @@ export async function importApp(
|
||||||
const isDirectory =
|
const isDirectory =
|
||||||
template.file && fs.lstatSync(template.file.path).isDirectory()
|
template.file && fs.lstatSync(template.file.path).isDirectory()
|
||||||
if (template.file && (isTar || isDirectory)) {
|
if (template.file && (isTar || isDirectory)) {
|
||||||
const tmpPath = isTar ? untarFile(template.file) : template.file.path
|
const tmpPath = isTar ? await untarFile(template.file) : template.file.path
|
||||||
if (isTar && template.file.password) {
|
if (isTar && template.file.password) {
|
||||||
await decryptFiles(tmpPath, template.file.password)
|
await decryptFiles(tmpPath, template.file.password)
|
||||||
}
|
}
|
||||||
|
|
|
@ -133,9 +133,14 @@ export async function exportRows(
|
||||||
|
|
||||||
let result = await search({ tableId, query: requestQuery, sort, sortOrder })
|
let result = await search({ tableId, query: requestQuery, sort, sortOrder })
|
||||||
let rows: Row[] = []
|
let rows: Row[] = []
|
||||||
|
let headers
|
||||||
|
|
||||||
|
if (!tableName) {
|
||||||
|
throw new HTTPError("Could not find table name.", 400)
|
||||||
|
}
|
||||||
|
const schema = datasource.entities[tableName].schema
|
||||||
|
|
||||||
// Filter data to only specified columns if required
|
// Filter data to only specified columns if required
|
||||||
|
|
||||||
if (columns && columns.length) {
|
if (columns && columns.length) {
|
||||||
for (let i = 0; i < result.rows.length; i++) {
|
for (let i = 0; i < result.rows.length; i++) {
|
||||||
rows[i] = {}
|
rows[i] = {}
|
||||||
|
@ -143,22 +148,17 @@ export async function exportRows(
|
||||||
rows[i][column] = result.rows[i][column]
|
rows[i][column] = result.rows[i][column]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
headers = columns
|
||||||
} else {
|
} else {
|
||||||
rows = result.rows
|
rows = result.rows
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!tableName) {
|
|
||||||
throw new HTTPError("Could not find table name.", 400)
|
|
||||||
}
|
|
||||||
const schema = datasource.entities[tableName].schema
|
|
||||||
let exportRows = cleanExportRows(rows, schema, format, columns)
|
let exportRows = cleanExportRows(rows, schema, format, columns)
|
||||||
|
|
||||||
let headers = Object.keys(schema)
|
|
||||||
|
|
||||||
let content: string
|
let content: string
|
||||||
switch (format) {
|
switch (format) {
|
||||||
case exporters.Format.CSV:
|
case exporters.Format.CSV:
|
||||||
content = exporters.csv(headers, exportRows)
|
content = exporters.csv(headers ?? Object.keys(schema), exportRows)
|
||||||
break
|
break
|
||||||
case exporters.Format.JSON:
|
case exporters.Format.JSON:
|
||||||
content = exporters.json(exportRows)
|
content = exporters.json(exportRows)
|
||||||
|
|
|
@ -110,7 +110,7 @@ export async function exportRows(
|
||||||
|
|
||||||
let rows: Row[] = []
|
let rows: Row[] = []
|
||||||
let schema = table.schema
|
let schema = table.schema
|
||||||
|
let headers
|
||||||
// Filter data to only specified columns if required
|
// Filter data to only specified columns if required
|
||||||
if (columns && columns.length) {
|
if (columns && columns.length) {
|
||||||
for (let i = 0; i < result.length; i++) {
|
for (let i = 0; i < result.length; i++) {
|
||||||
|
@ -119,6 +119,7 @@ export async function exportRows(
|
||||||
rows[i][column] = result[i][column]
|
rows[i][column] = result[i][column]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
headers = columns
|
||||||
} else {
|
} else {
|
||||||
rows = result
|
rows = result
|
||||||
}
|
}
|
||||||
|
@ -127,7 +128,7 @@ export async function exportRows(
|
||||||
if (format === Format.CSV) {
|
if (format === Format.CSV) {
|
||||||
return {
|
return {
|
||||||
fileName: "export.csv",
|
fileName: "export.csv",
|
||||||
content: csv(Object.keys(rows[0]), exportRows),
|
content: csv(headers ?? Object.keys(rows[0]), exportRows),
|
||||||
}
|
}
|
||||||
} else if (format === Format.JSON) {
|
} else if (format === Format.JSON) {
|
||||||
return {
|
return {
|
||||||
|
|
|
@ -136,6 +136,8 @@ export async function save(
|
||||||
schema.main = true
|
schema.main = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// add in the new table for relationship purposes
|
||||||
|
tables[tableToSave.name] = tableToSave
|
||||||
cleanupRelationships(tableToSave, tables, oldTable)
|
cleanupRelationships(tableToSave, tables, oldTable)
|
||||||
|
|
||||||
const operation = tableId ? Operation.UPDATE_TABLE : Operation.CREATE_TABLE
|
const operation = tableId ? Operation.UPDATE_TABLE : Operation.CREATE_TABLE
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
import {
|
import {
|
||||||
Datasource,
|
Datasource,
|
||||||
|
FieldType,
|
||||||
ManyToManyRelationshipFieldMetadata,
|
ManyToManyRelationshipFieldMetadata,
|
||||||
ManyToOneRelationshipFieldMetadata,
|
ManyToOneRelationshipFieldMetadata,
|
||||||
OneToManyRelationshipFieldMetadata,
|
OneToManyRelationshipFieldMetadata,
|
||||||
|
@ -42,10 +43,13 @@ export function cleanupRelationships(
|
||||||
for (let [relatedKey, relatedSchema] of Object.entries(
|
for (let [relatedKey, relatedSchema] of Object.entries(
|
||||||
relatedTable.schema
|
relatedTable.schema
|
||||||
)) {
|
)) {
|
||||||
if (
|
if (relatedSchema.type !== FieldType.LINK) {
|
||||||
relatedSchema.type === FieldTypes.LINK &&
|
continue
|
||||||
relatedSchema.fieldName === foreignKey
|
}
|
||||||
) {
|
// if they both have the same field name it will appear as if it needs to be removed,
|
||||||
|
// don't cleanup in this scenario
|
||||||
|
const sameFieldNameForBoth = relatedSchema.name === schema.name
|
||||||
|
if (relatedSchema.fieldName === foreignKey && !sameFieldNameForBoth) {
|
||||||
delete relatedTable.schema[relatedKey]
|
delete relatedTable.schema[relatedKey]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,6 @@ jest.mock("../../../utilities/rowProcessor", () => ({
|
||||||
|
|
||||||
jest.mock("../../../api/controllers/view/exporters", () => ({
|
jest.mock("../../../api/controllers/view/exporters", () => ({
|
||||||
...jest.requireActual("../../../api/controllers/view/exporters"),
|
...jest.requireActual("../../../api/controllers/view/exporters"),
|
||||||
csv: jest.fn(),
|
|
||||||
Format: {
|
Format: {
|
||||||
CSV: "csv",
|
CSV: "csv",
|
||||||
},
|
},
|
||||||
|
@ -102,5 +101,32 @@ describe("external row sdk", () => {
|
||||||
new HTTPError("Could not find table name.", 400)
|
new HTTPError("Could not find table name.", 400)
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
it("should only export specified columns", async () => {
|
||||||
|
mockDatasourcesGet.mockImplementation(async () => ({
|
||||||
|
entities: {
|
||||||
|
tablename: {
|
||||||
|
schema: {
|
||||||
|
name: {},
|
||||||
|
age: {},
|
||||||
|
dob: {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}))
|
||||||
|
const headers = ["name", "dob"]
|
||||||
|
|
||||||
|
const result = await exportRows({
|
||||||
|
tableId: "datasource__tablename",
|
||||||
|
format: Format.CSV,
|
||||||
|
query: {},
|
||||||
|
columns: headers,
|
||||||
|
})
|
||||||
|
|
||||||
|
expect(result).toEqual({
|
||||||
|
fileName: "export.csv",
|
||||||
|
content: `"name","dob"`,
|
||||||
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
|
@ -56,6 +56,7 @@ import {
|
||||||
|
|
||||||
import API from "./api"
|
import API from "./api"
|
||||||
import { cloneDeep } from "lodash"
|
import { cloneDeep } from "lodash"
|
||||||
|
import jwt, { Secret } from "jsonwebtoken"
|
||||||
|
|
||||||
mocks.licenses.init(pro)
|
mocks.licenses.init(pro)
|
||||||
|
|
||||||
|
@ -137,6 +138,10 @@ class TestConfiguration {
|
||||||
}
|
}
|
||||||
|
|
||||||
getAppId() {
|
getAppId() {
|
||||||
|
if (!this.appId) {
|
||||||
|
throw "appId has not been initialised properly"
|
||||||
|
}
|
||||||
|
|
||||||
return this.appId
|
return this.appId
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -387,7 +392,7 @@ class TestConfiguration {
|
||||||
sessionId: "sessionid",
|
sessionId: "sessionid",
|
||||||
tenantId: this.getTenantId(),
|
tenantId: this.getTenantId(),
|
||||||
}
|
}
|
||||||
const authToken = auth.jwt.sign(authObj, coreEnv.JWT_SECRET)
|
const authToken = jwt.sign(authObj, coreEnv.JWT_SECRET as Secret)
|
||||||
|
|
||||||
// returning necessary request headers
|
// returning necessary request headers
|
||||||
await cache.user.invalidateUser(userId)
|
await cache.user.invalidateUser(userId)
|
||||||
|
@ -408,7 +413,7 @@ class TestConfiguration {
|
||||||
sessionId: "sessionid",
|
sessionId: "sessionid",
|
||||||
tenantId,
|
tenantId,
|
||||||
}
|
}
|
||||||
const authToken = auth.jwt.sign(authObj, coreEnv.JWT_SECRET)
|
const authToken = jwt.sign(authObj, coreEnv.JWT_SECRET as Secret)
|
||||||
|
|
||||||
const headers: any = {
|
const headers: any = {
|
||||||
Accept: "application/json",
|
Accept: "application/json",
|
||||||
|
@ -510,7 +515,7 @@ class TestConfiguration {
|
||||||
// create dev app
|
// create dev app
|
||||||
// clear any old app
|
// clear any old app
|
||||||
this.appId = null
|
this.appId = null
|
||||||
this.app = await context.doInAppContext(null, async () => {
|
this.app = await context.doInTenant(this.tenantId!, async () => {
|
||||||
const app = await this._req(
|
const app = await this._req(
|
||||||
{ name: appName },
|
{ name: appName },
|
||||||
null,
|
null,
|
||||||
|
@ -519,7 +524,7 @@ class TestConfiguration {
|
||||||
this.appId = app.appId!
|
this.appId = app.appId!
|
||||||
return app
|
return app
|
||||||
})
|
})
|
||||||
return await context.doInAppContext(this.appId, async () => {
|
return await context.doInAppContext(this.getAppId(), async () => {
|
||||||
// create production app
|
// create production app
|
||||||
this.prodApp = await this.publish()
|
this.prodApp = await this.publish()
|
||||||
|
|
||||||
|
@ -817,7 +822,7 @@ class TestConfiguration {
|
||||||
}
|
}
|
||||||
|
|
||||||
async getAutomationLogs() {
|
async getAutomationLogs() {
|
||||||
return context.doInAppContext(this.appId, async () => {
|
return context.doInAppContext(this.getAppId(), async () => {
|
||||||
const now = new Date()
|
const now = new Date()
|
||||||
return await pro.sdk.automations.logs.logSearch({
|
return await pro.sdk.automations.logs.logSearch({
|
||||||
startDate: new Date(now.getTime() - 100000).toISOString(),
|
startDate: new Date(now.getTime() - 100000).toISOString(),
|
||||||
|
|
|
@ -249,7 +249,9 @@ export async function outputProcessing<T extends Row[] | Row>(
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
row[property].forEach((attachment: RowAttachment) => {
|
row[property].forEach((attachment: RowAttachment) => {
|
||||||
attachment.url ??= objectStore.getAppFileUrl(attachment.key)
|
if (!attachment.url) {
|
||||||
|
attachment.url = objectStore.getAppFileUrl(attachment.key)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
} else if (
|
} else if (
|
||||||
|
|
|
@ -3,6 +3,7 @@ import {
|
||||||
FieldType,
|
FieldType,
|
||||||
FieldTypeSubtypes,
|
FieldTypeSubtypes,
|
||||||
INTERNAL_TABLE_SOURCE_ID,
|
INTERNAL_TABLE_SOURCE_ID,
|
||||||
|
RowAttachment,
|
||||||
Table,
|
Table,
|
||||||
TableSourceType,
|
TableSourceType,
|
||||||
} from "@budibase/types"
|
} from "@budibase/types"
|
||||||
|
@ -70,6 +71,49 @@ describe("rowProcessor - outputProcessing", () => {
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
it("should handle attachments correctly", async () => {
|
||||||
|
const table: Table = {
|
||||||
|
_id: generator.guid(),
|
||||||
|
name: "TestTable",
|
||||||
|
type: "table",
|
||||||
|
sourceId: INTERNAL_TABLE_SOURCE_ID,
|
||||||
|
sourceType: TableSourceType.INTERNAL,
|
||||||
|
schema: {
|
||||||
|
attach: {
|
||||||
|
type: FieldType.ATTACHMENT,
|
||||||
|
name: "attach",
|
||||||
|
constraints: {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
const row: { attach: RowAttachment[] } = {
|
||||||
|
attach: [
|
||||||
|
{
|
||||||
|
size: 10,
|
||||||
|
name: "test",
|
||||||
|
extension: "jpg",
|
||||||
|
key: "test.jpg",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
const output = await outputProcessing(table, row, { squash: false })
|
||||||
|
expect(output.attach[0].url).toBe(
|
||||||
|
"/files/signed/prod-budi-app-assets/test.jpg"
|
||||||
|
)
|
||||||
|
|
||||||
|
row.attach[0].url = ""
|
||||||
|
const output2 = await outputProcessing(table, row, { squash: false })
|
||||||
|
expect(output2.attach[0].url).toBe(
|
||||||
|
"/files/signed/prod-budi-app-assets/test.jpg"
|
||||||
|
)
|
||||||
|
|
||||||
|
row.attach[0].url = "aaaa"
|
||||||
|
const output3 = await outputProcessing(table, row, { squash: false })
|
||||||
|
expect(output3.attach[0].url).toBe("aaaa")
|
||||||
|
})
|
||||||
|
|
||||||
it("process output even when the field is not empty", async () => {
|
it("process output even when the field is not empty", async () => {
|
||||||
const table: Table = {
|
const table: Table = {
|
||||||
_id: generator.guid(),
|
_id: generator.guid(),
|
||||||
|
|
|
@ -315,7 +315,7 @@ export const runLuceneQuery = (docs: any[], query?: SearchQuery) => {
|
||||||
new Date(docValue).getTime() > new Date(testValue.high).getTime()
|
new Date(docValue).getTime() > new Date(testValue.high).getTime()
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
throw "Cannot perform range filter - invalid type."
|
return false
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -130,32 +130,28 @@ describe("runLuceneQuery", () => {
|
||||||
expect(runLuceneQuery(docs, query).map(row => row.order_id)).toEqual([2])
|
expect(runLuceneQuery(docs, query).map(row => row.order_id)).toEqual([2])
|
||||||
})
|
})
|
||||||
|
|
||||||
it("should throw an error is an invalid doc value is passed into a range filter", async () => {
|
it("should return return all docs if an invalid doc value is passed into a range filter", async () => {
|
||||||
|
const docs = [
|
||||||
|
{
|
||||||
|
order_id: 4,
|
||||||
|
customer_id: 1758,
|
||||||
|
order_status: 5,
|
||||||
|
order_date: "{{ Binding.INVALID }}",
|
||||||
|
required_date: "2017-03-05T00:00:00.000Z",
|
||||||
|
shipped_date: "2017-03-03T00:00:00.000Z",
|
||||||
|
store_id: 2,
|
||||||
|
staff_id: 7,
|
||||||
|
description: undefined,
|
||||||
|
label: "",
|
||||||
|
},
|
||||||
|
]
|
||||||
const query = buildQuery("range", {
|
const query = buildQuery("range", {
|
||||||
order_date: {
|
order_date: {
|
||||||
low: "2016-01-04T00:00:00.000Z",
|
low: "2016-01-04T00:00:00.000Z",
|
||||||
high: "2016-01-11T00:00:00.000Z",
|
high: "2016-01-11T00:00:00.000Z",
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
expect(() =>
|
expect(runLuceneQuery(docs, query)).toEqual(docs)
|
||||||
runLuceneQuery(
|
|
||||||
[
|
|
||||||
{
|
|
||||||
order_id: 4,
|
|
||||||
customer_id: 1758,
|
|
||||||
order_status: 5,
|
|
||||||
order_date: "INVALID",
|
|
||||||
required_date: "2017-03-05T00:00:00.000Z",
|
|
||||||
shipped_date: "2017-03-03T00:00:00.000Z",
|
|
||||||
store_id: 2,
|
|
||||||
staff_id: 7,
|
|
||||||
description: undefined,
|
|
||||||
label: "",
|
|
||||||
},
|
|
||||||
],
|
|
||||||
query
|
|
||||||
)
|
|
||||||
).toThrowError("Cannot perform range filter - invalid type.")
|
|
||||||
})
|
})
|
||||||
|
|
||||||
it("should return rows with matches on empty filter", () => {
|
it("should return rows with matches on empty filter", () => {
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
export interface DatasourceAuthCookie {
|
||||||
|
appId: string
|
||||||
|
provider: string
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface SessionCookie {
|
||||||
|
sessionId: string
|
||||||
|
userId: string
|
||||||
|
}
|
|
@ -9,3 +9,4 @@ export * from "./app"
|
||||||
export * from "./global"
|
export * from "./global"
|
||||||
export * from "./pagination"
|
export * from "./pagination"
|
||||||
export * from "./searchFilter"
|
export * from "./searchFilter"
|
||||||
|
export * from "./cookies"
|
||||||
|
|
|
@ -10,6 +10,7 @@ export enum LockType {
|
||||||
DEFAULT = "default",
|
DEFAULT = "default",
|
||||||
DELAY_500 = "delay_500",
|
DELAY_500 = "delay_500",
|
||||||
CUSTOM = "custom",
|
CUSTOM = "custom",
|
||||||
|
AUTO_EXTEND = "auto_extend",
|
||||||
}
|
}
|
||||||
|
|
||||||
export enum LockName {
|
export enum LockName {
|
||||||
|
@ -21,7 +22,7 @@ export enum LockName {
|
||||||
QUOTA_USAGE_EVENT = "quota_usage_event",
|
QUOTA_USAGE_EVENT = "quota_usage_event",
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface LockOptions {
|
export type LockOptions = {
|
||||||
/**
|
/**
|
||||||
* The lock type determines which client to use
|
* The lock type determines which client to use
|
||||||
*/
|
*/
|
||||||
|
@ -35,10 +36,6 @@ export interface LockOptions {
|
||||||
* The name for the lock
|
* The name for the lock
|
||||||
*/
|
*/
|
||||||
name: LockName
|
name: LockName
|
||||||
/**
|
|
||||||
* The ttl to auto-expire the lock if not unlocked manually
|
|
||||||
*/
|
|
||||||
ttl: number
|
|
||||||
/**
|
/**
|
||||||
* The individual resource to lock. This is useful for locking around very specific identifiers, e.g. a document that is prone to conflicts
|
* The individual resource to lock. This is useful for locking around very specific identifiers, e.g. a document that is prone to conflicts
|
||||||
*/
|
*/
|
||||||
|
@ -47,4 +44,16 @@ export interface LockOptions {
|
||||||
* This is a system-wide lock - don't use tenancy in lock key
|
* This is a system-wide lock - don't use tenancy in lock key
|
||||||
*/
|
*/
|
||||||
systemLock?: boolean
|
systemLock?: boolean
|
||||||
}
|
} & (
|
||||||
|
| {
|
||||||
|
/**
|
||||||
|
* The ttl to auto-expire the lock if not unlocked manually
|
||||||
|
*/
|
||||||
|
ttl: number
|
||||||
|
type: Exclude<LockType, LockType.AUTO_EXTEND>
|
||||||
|
}
|
||||||
|
| {
|
||||||
|
type: LockType.AUTO_EXTEND
|
||||||
|
onExtend?: () => void
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
|
@ -15,6 +15,7 @@ import {
|
||||||
PasswordResetRequest,
|
PasswordResetRequest,
|
||||||
PasswordResetUpdateRequest,
|
PasswordResetUpdateRequest,
|
||||||
GoogleInnerConfig,
|
GoogleInnerConfig,
|
||||||
|
DatasourceAuthCookie,
|
||||||
} from "@budibase/types"
|
} from "@budibase/types"
|
||||||
import env from "../../../environment"
|
import env from "../../../environment"
|
||||||
|
|
||||||
|
@ -148,7 +149,13 @@ export const datasourcePreAuth = async (ctx: any, next: any) => {
|
||||||
}
|
}
|
||||||
|
|
||||||
export const datasourceAuth = async (ctx: any, next: any) => {
|
export const datasourceAuth = async (ctx: any, next: any) => {
|
||||||
const authStateCookie = getCookie(ctx, Cookie.DatasourceAuth)
|
const authStateCookie = getCookie<DatasourceAuthCookie>(
|
||||||
|
ctx,
|
||||||
|
Cookie.DatasourceAuth
|
||||||
|
)
|
||||||
|
if (!authStateCookie) {
|
||||||
|
throw new Error("Unable to retrieve datasource authentication cookie")
|
||||||
|
}
|
||||||
const provider = authStateCookie.provider
|
const provider = authStateCookie.provider
|
||||||
const { middleware } = require(`@budibase/backend-core`)
|
const { middleware } = require(`@budibase/backend-core`)
|
||||||
const handler = middleware.datasource[provider]
|
const handler = middleware.datasource[provider]
|
||||||
|
|
|
@ -35,6 +35,7 @@ import {
|
||||||
ConfigType,
|
ConfigType,
|
||||||
} from "@budibase/types"
|
} from "@budibase/types"
|
||||||
import API from "./api"
|
import API from "./api"
|
||||||
|
import jwt, { Secret } from "jsonwebtoken"
|
||||||
|
|
||||||
class TestConfiguration {
|
class TestConfiguration {
|
||||||
server: any
|
server: any
|
||||||
|
@ -209,7 +210,7 @@ class TestConfiguration {
|
||||||
sessionId: "sessionid",
|
sessionId: "sessionid",
|
||||||
tenantId: user.tenantId,
|
tenantId: user.tenantId,
|
||||||
}
|
}
|
||||||
const authCookie = auth.jwt.sign(authToken, coreEnv.JWT_SECRET)
|
const authCookie = jwt.sign(authToken, coreEnv.JWT_SECRET as Secret)
|
||||||
return {
|
return {
|
||||||
Accept: "application/json",
|
Accept: "application/json",
|
||||||
...this.cookieHeader([`${constants.Cookie.Auth}=${authCookie}`]),
|
...this.cookieHeader([`${constants.Cookie.Auth}=${authCookie}`]),
|
||||||
|
@ -327,7 +328,7 @@ class TestConfiguration {
|
||||||
// CONFIGS - OIDC
|
// CONFIGS - OIDC
|
||||||
|
|
||||||
getOIDConfigCookie(configId: string) {
|
getOIDConfigCookie(configId: string) {
|
||||||
const token = auth.jwt.sign(configId, coreEnv.JWT_SECRET)
|
const token = jwt.sign(configId, coreEnv.JWT_SECRET as Secret)
|
||||||
return this.cookieHeader([[`${constants.Cookie.OIDC_CONFIG}=${token}`]])
|
return this.cookieHeader([[`${constants.Cookie.OIDC_CONFIG}=${token}`]])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
10
yarn.lock
10
yarn.lock
|
@ -12667,16 +12667,16 @@ invert-kv@^2.0.0:
|
||||||
resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-2.0.0.tgz#7393f5afa59ec9ff5f67a27620d11c226e3eec02"
|
resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-2.0.0.tgz#7393f5afa59ec9ff5f67a27620d11c226e3eec02"
|
||||||
integrity sha512-wPVv/y/QQ/Uiirj/vh3oP+1Ww+AWehmi1g5fFWGPF6IpCBCDVrhgHRMvrLfdYcwDh3QJbGXDW4JAuzxElLSqKA==
|
integrity sha512-wPVv/y/QQ/Uiirj/vh3oP+1Ww+AWehmi1g5fFWGPF6IpCBCDVrhgHRMvrLfdYcwDh3QJbGXDW4JAuzxElLSqKA==
|
||||||
|
|
||||||
ioredis-mock@8.7.0:
|
ioredis-mock@8.9.0:
|
||||||
version "8.7.0"
|
version "8.9.0"
|
||||||
resolved "https://registry.yarnpkg.com/ioredis-mock/-/ioredis-mock-8.7.0.tgz#9877a85e0d233e1b49123d1c6e320df01e9a1d36"
|
resolved "https://registry.yarnpkg.com/ioredis-mock/-/ioredis-mock-8.9.0.tgz#5d694c4b81d3835e4291e0b527f947e260981779"
|
||||||
integrity sha512-BJcSjkR3sIMKbH93fpFzwlWi/jl1kd5I3vLvGQxnJ/W/6bD2ksrxnyQN186ljAp3Foz4p1ivViDE3rZeKEAluA==
|
integrity sha512-yIglcCkI1lvhwJVoMsR51fotZVsPsSk07ecTCgRTRlicG0Vq3lke6aAaHklyjmRNRsdYAgswqC2A0bPtQK4LSw==
|
||||||
dependencies:
|
dependencies:
|
||||||
"@ioredis/as-callback" "^3.0.0"
|
"@ioredis/as-callback" "^3.0.0"
|
||||||
"@ioredis/commands" "^1.2.0"
|
"@ioredis/commands" "^1.2.0"
|
||||||
fengari "^0.1.4"
|
fengari "^0.1.4"
|
||||||
fengari-interop "^0.1.3"
|
fengari-interop "^0.1.3"
|
||||||
semver "^7.3.8"
|
semver "^7.5.4"
|
||||||
|
|
||||||
ioredis@5.3.2:
|
ioredis@5.3.2:
|
||||||
version "5.3.2"
|
version "5.3.2"
|
||||||
|
|
Loading…
Reference in New Issue