Merge branch 'master' into budi-8166/search-filter-operators-dont-have-options-for-some-types

This commit is contained in:
Adria Navarro 2024-04-17 10:25:19 +02:00 committed by GitHub
commit ffa1e64e9e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
52 changed files with 885 additions and 233 deletions

View File

@ -152,6 +152,8 @@ $ helm install --create-namespace --namespace budibase budibase . -f values.yaml
| services.apps.autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization percentage for the apps service. Note that for autoscaling to work, you will need to have metrics-server configured, and resources set for the apps pods. |
| services.apps.extraContainers | list | `[]` | Additional containers to be added to the apps pod. |
| services.apps.extraEnv | list | `[]` | Extra environment variables to set for apps pods. Takes a list of name=value pairs. |
| services.apps.extraVolumeMounts | list | `[]` | Additional volumeMounts to the main apps container. |
| services.apps.extraVolumes | list | `[]` | Additional volumes to the apps pod. |
| services.apps.httpLogging | int | `1` | Whether or not to log HTTP requests to the apps service. |
| services.apps.livenessProbe | object | HTTP health checks. | Liveness probe configuration for apps pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
| services.apps.logLevel | string | `"info"` | The log level for the apps service. |
@ -166,6 +168,8 @@ $ helm install --create-namespace --namespace budibase budibase . -f values.yaml
| services.automationWorkers.enabled | bool | `true` | Whether or not to enable the automation worker service. If you disable this, automations will be processed by the apps service. |
| services.automationWorkers.extraContainers | list | `[]` | Additional containers to be added to the automationWorkers pod. |
| services.automationWorkers.extraEnv | list | `[]` | Extra environment variables to set for automation worker pods. Takes a list of name=value pairs. |
| services.automationWorkers.extraVolumeMounts | list | `[]` | Additional volumeMounts to the main automationWorkers container. |
| services.automationWorkers.extraVolumes | list | `[]` | Additional volumes to the automationWorkers pod. |
| services.automationWorkers.livenessProbe | object | HTTP health checks. | Liveness probe configuration for automation worker pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
| services.automationWorkers.logLevel | string | `"info"` | The log level for the automation worker service. |
| services.automationWorkers.readinessProbe | object | HTTP health checks. | Readiness probe configuration for automation worker pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
@ -185,6 +189,8 @@ $ helm install --create-namespace --namespace budibase budibase . -f values.yaml
| services.objectStore.cloudfront.privateKey64 | string | `""` | Base64 encoded private key for the above public key. |
| services.objectStore.cloudfront.publicKeyId | string | `""` | ID of public key stored in cloudfront. |
| services.objectStore.extraContainers | list | `[]` | Additional containers to be added to the objectStore pod. |
| services.objectStore.extraVolumeMounts | list | `[]` | Additional volumeMounts to the main objectStore container. |
| services.objectStore.extraVolumes | list | `[]` | Additional volumes to the objectStore pod. |
| services.objectStore.minio | bool | `true` | Set to false if using another object store, such as S3. You will need to set `services.objectStore.url` to point to your bucket if you do this. |
| services.objectStore.region | string | `""` | AWS_REGION if using S3 |
| services.objectStore.resources | object | `{}` | The resources to use for Minio pods. See <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/> for more information on how to set these. |
@ -197,6 +203,8 @@ $ helm install --create-namespace --namespace budibase budibase . -f values.yaml
| services.proxy.autoscaling.minReplicas | int | `1` | |
| services.proxy.autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization percentage for the proxy service. Note that for autoscaling to work, you will need to have metrics-server configured, and resources set for the proxy pods. |
| services.proxy.extraContainers | list | `[]` | |
| services.proxy.extraVolumeMounts | list | `[]` | Additional volumeMounts to the main proxy container. |
| services.proxy.extraVolumes | list | `[]` | Additional volumes to the proxy pod. |
| services.proxy.livenessProbe | object | HTTP health checks. | Liveness probe configuration for proxy pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
| services.proxy.readinessProbe | object | HTTP health checks. | Readiness probe configuration for proxy pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
| services.proxy.replicaCount | int | `1` | The number of proxy replicas to run. |
@ -204,6 +212,9 @@ $ helm install --create-namespace --namespace budibase budibase . -f values.yaml
| services.proxy.startupProbe | object | HTTP health checks. | Startup probe configuration for proxy pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
| services.redis.enabled | bool | `true` | Whether or not to deploy a Redis pod into your cluster. |
| services.redis.extraContainers | list | `[]` | Additional containers to be added to the redis pod. |
| services.redis.extraVolumeMounts | list | `[]` | Additional volumeMounts to the main redis container. |
| services.redis.extraVolumes | list | `[]` | Additional volumes to the redis pod. |
| services.redis.image | string | `"redis"` | The Redis image to use. |
| services.redis.password | string | `"budibase"` | The password to use when connecting to Redis. It's recommended that you change this from the default if you're running Redis in-cluster. |
| services.redis.port | int | `6379` | Port to expose Redis on. |
| services.redis.resources | object | `{}` | The resources to use for Redis pods. See <https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/> for more information on how to set these. |
@ -216,6 +227,8 @@ $ helm install --create-namespace --namespace budibase budibase . -f values.yaml
| services.worker.autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization percentage for the worker service. Note that for autoscaling to work, you will need to have metrics-server configured, and resources set for the worker pods. |
| services.worker.extraContainers | list | `[]` | Additional containers to be added to the worker pod. |
| services.worker.extraEnv | list | `[]` | Extra environment variables to set for worker pods. Takes a list of name=value pairs. |
| services.worker.extraVolumeMounts | list | `[]` | Additional volumeMounts to the main worker container. |
| services.worker.extraVolumes | list | `[]` | Additional volumes to the worker pod. |
| services.worker.httpLogging | int | `1` | Whether or not to log HTTP requests to the worker service. |
| services.worker.livenessProbe | object | HTTP health checks. | Liveness probe configuration for worker pods. You shouldn't need to change this, but if you want to you can find more information here: <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/> |
| services.worker.logLevel | string | `"info"` | The log level for the worker service. |

View File

@ -235,6 +235,10 @@ spec:
args:
{{- toYaml .Values.services.apps.args | nindent 10 }}
{{ end }}
{{ if .Values.services.apps.extraVolumeMounts }}
volumeMounts:
{{- toYaml .Values.services.apps.extraVolumeMounts | nindent 10 }}
{{- end }}
{{- if .Values.services.apps.extraContainers }}
{{- toYaml .Values.services.apps.extraContainers | nindent 6 }}
{{- end }}
@ -261,4 +265,8 @@ spec:
- name: ndots
value: {{ .Values.services.apps.ndots | quote }}
{{ end }}
{{ if .Values.services.apps.extraVolumes }}
volumes:
{{- toYaml .Values.services.apps.extraVolumes | nindent 6 }}
{{- end }}
status: {}

View File

@ -235,6 +235,10 @@ spec:
args:
{{- toYaml .Values.services.automationWorkers.args | nindent 10 }}
{{ end }}
{{ if .Values.services.automationWorkers.extraVolumeMounts }}
volumeMounts:
{{- toYaml .Values.services.automationWorkers.extraVolumeMounts | nindent 10 }}
{{ end }}
{{- if .Values.services.automationWorkers.extraContainers }}
{{- toYaml .Values.services.automationWorkers.extraContainers | nindent 6 }}
{{- end }}
@ -261,5 +265,9 @@ spec:
- name: ndots
value: {{ .Values.services.automationWorkers.ndots | quote }}
{{ end }}
{{ if .Values.services.automationWorkers.extraVolumes }}
volumes:
{{- toYaml .Values.services.automationWorkers.extraVolumes | nindent 8 }}
{{ end }}
status: {}
{{- end }}

View File

@ -54,6 +54,9 @@ spec:
volumeMounts:
- mountPath: /data
name: minio-data
{{ if .Values.services.objectStore.extraVolumeMounts }}
{{- toYaml .Values.services.objectStore.extraVolumeMounts | nindent 8 }}
{{- end }}
{{- if .Values.services.objectStore.extraContainers }}
{{- toYaml .Values.services.objectStore.extraContainers | nindent 6 }}
{{- end }}
@ -78,5 +81,8 @@ spec:
- name: minio-data
persistentVolumeClaim:
claimName: minio-data
{{ if .Values.services.objectStore.extraVolumes }}
{{- toYaml .Values.services.objectStore.extraVolumes | nindent 6 }}
{{- end }}
status: {}
{{- end }}

View File

@ -82,6 +82,10 @@ spec:
resources:
{{- toYaml . | nindent 10 }}
{{ end }}
{{ if .Values.services.proxy.extraVolumeMounts }}
volumeMounts:
{{- toYaml .Values.services.proxy.extraVolumeMounts | nindent 8 }}
{{- end }}
{{- if .Values.services.proxy.extraContainers }}
{{- toYaml .Values.services.proxy.extraContainers | nindent 6 }}
{{- end }}
@ -110,7 +114,10 @@ spec:
args:
{{- toYaml .Values.services.proxy.args | nindent 8 }}
{{ end }}
{{ if .Values.services.proxy.extraVolumes }}
volumes:
{{- toYaml .Values.services.proxy.extraVolumes | nindent 6 }}
{{ end }}
{{ if .Values.services.proxy.ndots }}
dnsConfig:
options:

View File

@ -22,7 +22,7 @@ spec:
- redis-server
- --requirepass
- {{ .Values.services.redis.password }}
image: redis
image: {{ .Values.services.redis.image }}
imagePullPolicy: ""
name: redis-service
ports:
@ -34,6 +34,9 @@ spec:
volumeMounts:
- mountPath: /data
name: redis-data
{{ if .Values.services.redis.extraVolumeMounts }}
{{- toYaml .Values.services.redis.extraVolumeMounts | nindent 8 }}
{{- end }}
{{- if .Values.services.redis.extraContainers }}
{{- toYaml .Values.services.redis.extraContainers | nindent 6 }}
{{- end }}
@ -58,6 +61,9 @@ spec:
- name: redis-data
persistentVolumeClaim:
claimName: redis-data
{{ if .Values.services.redis.extraVolumes }}
{{- toYaml .Values.services.redis.extraVolumes | nindent 6 }}
{{- end }}
status: {}
{{- end }}

View File

@ -221,6 +221,10 @@ spec:
args:
{{- toYaml .Values.services.worker.args | nindent 10 }}
{{ end }}
{{ if .Values.services.worker.extraVolumeMounts }}
volumeMounts:
{{- toYaml .Values.services.worker.extraVolumeMounts | nindent 10 }}
{{- end }}
{{- if .Values.services.worker.extraContainers }}
{{- toYaml .Values.services.worker.extraContainers | nindent 6 }}
{{- end }}
@ -247,4 +251,8 @@ spec:
- name: ndots
value: {{ .Values.services.worker.ndots | quote }}
{{ end }}
{{ if .Values.services.worker.extraVolumes }}
volumes:
{{- toYaml .Values.services.worker.extraVolumes | nindent 6 }}
{{- end }}
status: {}

View File

@ -211,6 +211,16 @@ services:
# - name: my-sidecar
# image: myimage:latest
# -- Additional volumeMounts to the main proxy container.
extraVolumeMounts: []
# - name: my-volume
# mountPath: /path/to/mount
# -- Additional volumes to the proxy pod.
extraVolumes: []
# - name: my-volume
# emptyDir: {}
apps:
# @ignore (you shouldn't need to change this)
port: 4002
@ -283,6 +293,16 @@ services:
# - name: my-sidecar
# image: myimage:latest
# -- Additional volumeMounts to the main apps container.
extraVolumeMounts: []
# - name: my-volume
# mountPath: /path/to/mount
# -- Additional volumes to the apps pod.
extraVolumes: []
# - name: my-volume
# emptyDir: {}
automationWorkers:
# -- Whether or not to enable the automation worker service. If you disable this,
# automations will be processed by the apps service.
@ -359,6 +379,16 @@ services:
# - name: my-sidecar
# image: myimage:latest
# -- Additional volumeMounts to the main automationWorkers container.
extraVolumeMounts: []
# - name: my-volume
# mountPath: /path/to/mount
# -- Additional volumes to the automationWorkers pod.
extraVolumes: []
# - name: my-volume
# emptyDir: {}
worker:
# @ignore (you shouldn't need to change this)
port: 4003
@ -431,6 +461,16 @@ services:
# - name: my-sidecar
# image: myimage:latest
# -- Additional volumeMounts to the main worker container.
extraVolumeMounts: []
# - name: my-volume
# mountPath: /path/to/mount
# -- Additional volumes to the worker pod.
extraVolumes: []
# - name: my-volume
# emptyDir: {}
couchdb:
# -- Whether or not to spin up a CouchDB instance in your cluster. True by
# default, and the configuration for the CouchDB instance is under the
@ -456,6 +496,8 @@ services:
resources: {}
redis:
# -- The Redis image to use.
image: redis
# -- Whether or not to deploy a Redis pod into your cluster.
enabled: true
# -- Port to expose Redis on.
@ -484,6 +526,16 @@ services:
# - name: my-sidecar
# image: myimage:latest
# -- Additional volumeMounts to the main redis container.
extraVolumeMounts: []
# - name: my-volume
# mountPath: /path/to/mount
# -- Additional volumes to the redis pod.
extraVolumes: []
# - name: my-volume
# emptyDir: {}
objectStore:
# -- Set to false if using another object store, such as S3. You will need
# to set `services.objectStore.url` to point to your bucket if you do this.
@ -530,6 +582,16 @@ services:
# - name: my-sidecar
# image: myimage:latest
# -- Additional volumeMounts to the main objectStore container.
extraVolumeMounts: []
# - name: my-volume
# mountPath: /path/to/mount
# -- Additional volumes to the objectStore pod.
extraVolumes: []
# - name: my-volume
# emptyDir: {}
# Override values in couchDB subchart. We're only specifying the values we're changing.
# If you want to see all of the available values, see:
# https://github.com/apache/couchdb-helm/tree/couchdb-4.3.0/couchdb

View File

@ -17,6 +17,7 @@ APP_PORT=4002
WORKER_PORT=4003
MINIO_PORT=4004
COUCH_DB_PORT=4005
COUCH_DB_SQS_PORT=4006
REDIS_PORT=6379
WATCHTOWER_PORT=6161
BUDIBASE_ENVIRONMENT=PRODUCTION
@ -28,4 +29,4 @@ BB_ADMIN_USER_PASSWORD=
# A path that is watched for plugin bundles. Any bundles found are imported automatically/
PLUGINS_DIR=
ROLLING_LOG_MAX_SIZE=
ROLLING_LOG_MAX_SIZE=

View File

@ -1,5 +1,5 @@
{
"version": "2.23.4",
"version": "2.23.6",
"npmClient": "yarn",
"packages": [
"packages/*",

View File

@ -56,6 +56,7 @@
"dev:noserver": "yarn run kill-builder && lerna run --stream dev:stack:up --ignore @budibase/account-portal-server && lerna run --stream dev --ignore @budibase/backend-core --ignore @budibase/server --ignore @budibase/worker --ignore=@budibase/account-portal-ui --ignore @budibase/account-portal-server",
"dev:server": "yarn run kill-server && lerna run --stream dev --scope @budibase/worker --scope @budibase/server",
"dev:accountportal": "yarn kill-accountportal && lerna run dev --stream --scope @budibase/account-portal-ui --scope @budibase/account-portal-server",
"dev:camunda": "./scripts/deploy-camunda.sh",
"dev:all": "yarn run kill-all && lerna run --stream dev",
"dev:built": "yarn run kill-all && cd packages/server && yarn dev:stack:up && cd ../../ && lerna run --stream dev:built",
"dev:docker": "yarn build --scope @budibase/server --scope @budibase/worker && docker-compose -f hosting/docker-compose.build.yaml -f hosting/docker-compose.dev.yaml --env-file hosting/.env up --build --scale proxy-service=0",

@ -1 +1 @@
Subproject commit a0ee9cad8cefb8f9f40228705711be174f018fa9
Subproject commit 328c84234d11d97d840f0eb2c72665b04ba9e4f8

View File

@ -107,7 +107,7 @@ const environment = {
ENCRYPTION_KEY: process.env.ENCRYPTION_KEY,
API_ENCRYPTION_KEY: getAPIEncryptionKey(),
COUCH_DB_URL: process.env.COUCH_DB_URL || "http://localhost:4005",
COUCH_DB_SQL_URL: process.env.COUCH_DB_SQL_URL || "http://localhost:4984",
COUCH_DB_SQL_URL: process.env.COUCH_DB_SQL_URL || "http://localhost:4006",
COUCH_DB_USERNAME: process.env.COUCH_DB_USER,
COUCH_DB_PASSWORD: process.env.COUCH_DB_PASSWORD,
GOOGLE_CLIENT_ID: process.env.GOOGLE_CLIENT_ID,

View File

@ -27,14 +27,6 @@
return []
}
}
async function deleteAttachments(fileList) {
try {
return await API.deleteBuilderAttachments(fileList)
} catch (error) {
return []
}
}
</script>
<Dropzone
@ -42,6 +34,5 @@
{label}
{...$$restProps}
{processFiles}
{deleteAttachments}
{handleFileTooLarge}
/>

View File

@ -157,6 +157,11 @@
width: 100%;
}
/* Use normal theme colors for links when using a top nav */
.dropdown:not(.left) .sublinks a {
color: var(--spectrum-alias-text-color);
}
/* Left dropdowns */
.dropdown.left .sublinks-wrapper {
display: none;

View File

@ -58,17 +58,6 @@
}
}
const deleteAttachments = async fileList => {
try {
return await API.deleteAttachments({
keys: fileList,
tableId: formContext?.dataSource?.tableId,
})
} catch (error) {
return []
}
}
const handleChange = e => {
const value = fieldApiMapper.set(e.detail)
const changed = fieldApi.setValue(value)
@ -98,7 +87,6 @@
error={fieldState.error}
on:change={handleChange}
{processFiles}
{deleteAttachments}
{handleFileTooLarge}
{handleTooManyFiles}
{maximum}

View File

@ -61,34 +61,6 @@ export const buildAttachmentEndpoints = API => {
})
return { publicUrl }
},
/**
* Deletes attachments from the bucket.
* @param keys the attachments to delete
* @param tableId the associated table ID
*/
deleteAttachments: async ({ keys, tableId }) => {
return await API.post({
url: `/api/attachments/${tableId}/delete`,
body: {
keys,
},
})
},
/**
* Deletes attachments from the builder bucket.
* @param keys the attachments to delete
*/
deleteBuilderAttachments: async keys => {
return await API.post({
url: `/api/attachments/delete`,
body: {
keys,
},
})
},
/**
* Download an attachment from a row given its column name.
* @param datasourceId the ID of the datasource to download from

View File

@ -61,14 +61,6 @@
}
}
const deleteAttachments = async fileList => {
try {
return await API.deleteBuilderAttachments(fileList)
} catch (error) {
return []
}
}
onMount(() => {
api = {
focus: () => open(),
@ -101,7 +93,6 @@
on:change={e => onChange(e.detail)}
maximum={maximum || schema.constraints?.length?.maximum}
{processFiles}
{deleteAttachments}
{handleFileTooLarge}
/>
</div>

@ -1 +1 @@
Subproject commit ef186d00241f96037f9fd34d7a3826041977ab3a
Subproject commit c68183402b8fb17248572006531d5293ffc8a9ac

View File

@ -125,7 +125,7 @@
"@babel/preset-env": "7.16.11",
"@swc/core": "1.3.71",
"@swc/jest": "0.2.27",
"@types/archiver": "^6.0.2",
"@types/archiver": "6.0.2",
"@types/global-agent": "2.1.1",
"@types/google-spreadsheet": "3.1.5",
"@types/jest": "29.5.5",

View File

@ -320,6 +320,7 @@ async function performAppCreate(ctx: UserCtx<CreateAppRequest, App>) {
"theme",
"customTheme",
"icon",
"snippets",
]
keys.forEach(key => {
if (existing[key]) {

View File

@ -36,7 +36,6 @@ import { getDatasourceAndQuery } from "../../../sdk/app/rows/utils"
import { processObjectSync } from "@budibase/string-templates"
import { cloneDeep } from "lodash/fp"
import { db as dbCore } from "@budibase/backend-core"
import AliasTables from "./alias"
import sdk from "../../../sdk"
import env from "../../../environment"
@ -120,6 +119,9 @@ async function removeManyToManyRelationships(
endpoint: getEndpoint(tableId, Operation.DELETE),
body: { [colName]: null },
filters,
meta: {
table,
},
})
} else {
return []
@ -134,6 +136,9 @@ async function removeOneToManyRelationships(rowId: string, table: Table) {
return getDatasourceAndQuery({
endpoint: getEndpoint(tableId, Operation.UPDATE),
filters,
meta: {
table,
},
})
} else {
return []
@ -249,6 +254,9 @@ export class ExternalRequest<T extends Operation> {
const response = await getDatasourceAndQuery({
endpoint: getEndpoint(table._id!, Operation.READ),
filters: buildFilters(rowId, {}, table),
meta: {
table,
},
})
if (Array.isArray(response) && response.length > 0) {
return response[0]
@ -396,6 +404,9 @@ export class ExternalRequest<T extends Operation> {
[fieldName]: row[lookupField],
},
},
meta: {
table,
},
})
// this is the response from knex if no rows found
const rows: Row[] =
@ -426,6 +437,7 @@ export class ExternalRequest<T extends Operation> {
// if we're creating (in a through table) need to wipe the existing ones first
const promises = []
const related = await this.lookupRelations(mainTableId, row)
const table = this.getTable(mainTableId)!
for (let relationship of relationships) {
const { key, tableId, isUpdate, id, ...rest } = relationship
const body: { [key: string]: any } = processObjectSync(rest, row, {})
@ -471,6 +483,9 @@ export class ExternalRequest<T extends Operation> {
// if we're doing many relationships then we're writing, only one response
body,
filters: buildFilters(id, {}, linkTable),
meta: {
table,
},
})
)
} else {
@ -618,7 +633,7 @@ export class ExternalRequest<T extends Operation> {
if (env.SQL_ALIASING_DISABLE) {
response = await getDatasourceAndQuery(json)
} else {
const aliasing = new AliasTables(Object.keys(this.tables))
const aliasing = new sdk.rows.AliasTables(Object.keys(this.tables))
response = await aliasing.queryWithAliasing(json)
}

View File

@ -62,12 +62,12 @@ export function basicProcessing({
row,
table,
isLinked,
internal,
sqs,
}: {
row: Row
table: Table
isLinked: boolean
internal?: boolean
sqs?: boolean
}): Row {
const thisRow: Row = {}
// filter the row down to what is actually the row (not joined)
@ -84,12 +84,13 @@ export function basicProcessing({
thisRow[fieldName] = value
}
}
if (!internal) {
if (!sqs) {
thisRow._id = generateIdForRow(row, table, isLinked)
thisRow.tableId = table._id
thisRow._rev = "rev"
} else {
for (let internalColumn of CONSTANT_INTERNAL_ROW_COLS) {
const columns = Object.keys(table.schema)
for (let internalColumn of [...CONSTANT_INTERNAL_ROW_COLS, ...columns]) {
thisRow[internalColumn] = extractFieldValue({
row,
tableName: table._id!,

View File

@ -51,11 +51,11 @@ export async function updateRelationshipColumns(
continue
}
let linked = await basicProcessing({
let linked = basicProcessing({
row,
table: linkedTable,
isLinked: true,
internal: opts?.sqs,
sqs: opts?.sqs,
})
if (!linked._id) {
continue

View File

@ -132,6 +132,7 @@ export async function sqlOutputProcessing(
let rowId = row._id
if (opts?.sqs) {
rowId = getInternalRowId(row, table)
row._id = rowId
} else if (!rowId) {
rowId = generateIdForRow(row, table)
row._id = rowId
@ -153,7 +154,7 @@ export async function sqlOutputProcessing(
row,
table,
isLinked: false,
internal: opts?.sqs,
sqs: opts?.sqs,
}),
table
)
@ -167,7 +168,8 @@ export async function sqlOutputProcessing(
tables,
row,
finalRows,
relationships
relationships,
opts
)
}

View File

@ -127,13 +127,6 @@ export const uploadFile = async function (
)
}
export const deleteObjects = async function (ctx: Ctx) {
ctx.body = await objectStore.deleteFiles(
ObjectStoreBuckets.APPS,
ctx.request.body.keys
)
}
const requiresMigration = async (ctx: Ctx) => {
const appId = context.getAppId()
if (!appId) {

View File

@ -22,6 +22,7 @@ export async function makeTableRequest(
operation,
},
meta: {
table,
tables,
},
table,

View File

@ -32,11 +32,6 @@ router
.get("/builder/:file*", controller.serveBuilder)
.get("/api/assets/client", controller.serveClientLibrary)
.post("/api/attachments/process", authorized(BUILDER), controller.uploadFile)
.post(
"/api/attachments/delete",
authorized(BUILDER),
controller.deleteObjects
)
.post("/api/beta/:feature", controller.toggleBetaUiFeature)
.post(
"/api/attachments/:tableId/upload",
@ -44,12 +39,6 @@ router
authorized(PermissionType.TABLE, PermissionLevel.WRITE),
controller.uploadFile
)
.post(
"/api/attachments/:tableId/delete",
paramResource("tableId"),
authorized(PermissionType.TABLE, PermissionLevel.WRITE),
controller.deleteObjects
)
.get("/app/preview", authorized(BUILDER), controller.serveBuilderPreview)
.get("/app/:appUrl/:path*", controller.serveApp)
.get("/:appId/:path*", controller.serveApp)

View File

@ -8,6 +8,8 @@ import {
FieldType,
RowSearchParams,
SearchFilters,
SortOrder,
SortType,
Table,
TableSchema,
} from "@budibase/types"
@ -62,7 +64,32 @@ describe.each([
class SearchAssertion {
constructor(private readonly query: RowSearchParams) {}
async toFind(expectedRows: any[]) {
// Asserts that the query returns rows matching exactly the set of rows
// passed in. The order of the rows matters. Rows returned in an order
// different to the one passed in will cause the assertion to fail. Extra
// rows returned by the query will also cause the assertion to fail.
async toMatchExactly(expectedRows: any[]) {
const { rows: foundRows } = await config.api.row.search(table._id!, {
...this.query,
tableId: table._id!,
})
// eslint-disable-next-line jest/no-standalone-expect
expect(foundRows).toHaveLength(expectedRows.length)
// eslint-disable-next-line jest/no-standalone-expect
expect(foundRows).toEqual(
expectedRows.map((expectedRow: any) =>
expect.objectContaining(
foundRows.find(foundRow => _.isMatch(foundRow, expectedRow))
)
)
)
}
// Asserts that the query returns rows matching exactly the set of rows
// passed in. The order of the rows is not important, but extra rows will
// cause the assertion to fail.
async toContainExactly(expectedRows: any[]) {
const { rows: foundRows } = await config.api.row.search(table._id!, {
...this.query,
tableId: table._id!,
@ -82,8 +109,39 @@ describe.each([
)
}
// Asserts that the query returns rows matching the set of rows passed in.
// The order of the rows is not important. Extra rows will not cause the
// assertion to fail.
async toContain(expectedRows: any[]) {
const { rows: foundRows } = await config.api.row.search(table._id!, {
...this.query,
tableId: table._id!,
})
// eslint-disable-next-line jest/no-standalone-expect
expect(foundRows).toEqual(
expect.arrayContaining(
expectedRows.map((expectedRow: any) =>
expect.objectContaining(
foundRows.find(foundRow => _.isMatch(foundRow, expectedRow))
)
)
)
)
}
async toFindNothing() {
await this.toFind([])
await this.toContainExactly([])
}
async toHaveLength(length: number) {
const { rows: foundRows } = await config.api.row.search(table._id!, {
...this.query,
tableId: table._id!,
})
// eslint-disable-next-line jest/no-standalone-expect
expect(foundRows).toHaveLength(length)
}
}
@ -105,28 +163,33 @@ describe.each([
describe("misc", () => {
it("should return all if no query is passed", () =>
expectSearch({} as RowSearchParams).toFind([
expectSearch({} as RowSearchParams).toContainExactly([
{ name: "foo" },
{ name: "bar" },
]))
it("should return all if empty query is passed", () =>
expectQuery({}).toFind([{ name: "foo" }, { name: "bar" }]))
expectQuery({}).toContainExactly([{ name: "foo" }, { name: "bar" }]))
it("should return all if onEmptyFilter is RETURN_ALL", () =>
expectQuery({
onEmptyFilter: EmptyFilterOption.RETURN_ALL,
}).toFind([{ name: "foo" }, { name: "bar" }]))
}).toContainExactly([{ name: "foo" }, { name: "bar" }]))
it("should return nothing if onEmptyFilter is RETURN_NONE", () =>
expectQuery({
onEmptyFilter: EmptyFilterOption.RETURN_NONE,
}).toFindNothing())
it("should respect limit", () =>
expectSearch({ limit: 1, paginate: true, query: {} }).toHaveLength(1))
})
describe("equal", () => {
it("successfully finds a row", () =>
expectQuery({ equal: { name: "foo" } }).toFind([{ name: "foo" }]))
expectQuery({ equal: { name: "foo" } }).toContainExactly([
{ name: "foo" },
]))
it("fails to find nonexistent row", () =>
expectQuery({ equal: { name: "none" } }).toFindNothing())
@ -134,15 +197,21 @@ describe.each([
describe("notEqual", () => {
it("successfully finds a row", () =>
expectQuery({ notEqual: { name: "foo" } }).toFind([{ name: "bar" }]))
expectQuery({ notEqual: { name: "foo" } }).toContainExactly([
{ name: "bar" },
]))
it("fails to find nonexistent row", () =>
expectQuery({ notEqual: { name: "bar" } }).toFind([{ name: "foo" }]))
expectQuery({ notEqual: { name: "bar" } }).toContainExactly([
{ name: "foo" },
]))
})
describe("oneOf", () => {
it("successfully finds a row", () =>
expectQuery({ oneOf: { name: ["foo"] } }).toFind([{ name: "foo" }]))
expectQuery({ oneOf: { name: ["foo"] } }).toContainExactly([
{ name: "foo" },
]))
it("fails to find nonexistent row", () =>
expectQuery({ oneOf: { name: ["none"] } }).toFindNothing())
@ -150,11 +219,69 @@ describe.each([
describe("fuzzy", () => {
it("successfully finds a row", () =>
expectQuery({ fuzzy: { name: "oo" } }).toFind([{ name: "foo" }]))
expectQuery({ fuzzy: { name: "oo" } }).toContainExactly([
{ name: "foo" },
]))
it("fails to find nonexistent row", () =>
expectQuery({ fuzzy: { name: "none" } }).toFindNothing())
})
describe("range", () => {
it("successfully finds multiple rows", () =>
expectQuery({
range: { name: { low: "a", high: "z" } },
}).toContainExactly([{ name: "bar" }, { name: "foo" }]))
it("successfully finds a row with a high bound", () =>
expectQuery({
range: { name: { low: "a", high: "c" } },
}).toContainExactly([{ name: "bar" }]))
it("successfully finds a row with a low bound", () =>
expectQuery({
range: { name: { low: "f", high: "z" } },
}).toContainExactly([{ name: "foo" }]))
it("successfully finds no rows", () =>
expectQuery({
range: { name: { low: "g", high: "h" } },
}).toFindNothing())
})
describe("sort", () => {
it("sorts ascending", () =>
expectSearch({
query: {},
sort: "name",
sortOrder: SortOrder.ASCENDING,
}).toMatchExactly([{ name: "bar" }, { name: "foo" }]))
it("sorts descending", () =>
expectSearch({
query: {},
sort: "name",
sortOrder: SortOrder.DESCENDING,
}).toMatchExactly([{ name: "foo" }, { name: "bar" }]))
describe("sortType STRING", () => {
it("sorts ascending", () =>
expectSearch({
query: {},
sort: "name",
sortType: SortType.STRING,
sortOrder: SortOrder.ASCENDING,
}).toMatchExactly([{ name: "bar" }, { name: "foo" }]))
it("sorts descending", () =>
expectSearch({
query: {},
sort: "name",
sortType: SortType.STRING,
sortOrder: SortOrder.DESCENDING,
}).toMatchExactly([{ name: "foo" }, { name: "bar" }]))
})
})
})
describe("numbers", () => {
@ -167,7 +294,7 @@ describe.each([
describe("equal", () => {
it("successfully finds a row", () =>
expectQuery({ equal: { age: 1 } }).toFind([{ age: 1 }]))
expectQuery({ equal: { age: 1 } }).toContainExactly([{ age: 1 }]))
it("fails to find nonexistent row", () =>
expectQuery({ equal: { age: 2 } }).toFindNothing())
@ -175,15 +302,15 @@ describe.each([
describe("notEqual", () => {
it("successfully finds a row", () =>
expectQuery({ notEqual: { age: 1 } }).toFind([{ age: 10 }]))
expectQuery({ notEqual: { age: 1 } }).toContainExactly([{ age: 10 }]))
it("fails to find nonexistent row", () =>
expectQuery({ notEqual: { age: 10 } }).toFind([{ age: 1 }]))
expectQuery({ notEqual: { age: 10 } }).toContainExactly([{ age: 1 }]))
})
describe("oneOf", () => {
it("successfully finds a row", () =>
expectQuery({ oneOf: { age: [1] } }).toFind([{ age: 1 }]))
expectQuery({ oneOf: { age: [1] } }).toContainExactly([{ age: 1 }]))
it("fails to find nonexistent row", () =>
expectQuery({ oneOf: { age: [2] } }).toFindNothing())
@ -193,17 +320,56 @@ describe.each([
it("successfully finds a row", () =>
expectQuery({
range: { age: { low: 1, high: 5 } },
}).toFind([{ age: 1 }]))
}).toContainExactly([{ age: 1 }]))
it("successfully finds multiple rows", () =>
expectQuery({
range: { age: { low: 1, high: 10 } },
}).toFind([{ age: 1 }, { age: 10 }]))
}).toContainExactly([{ age: 1 }, { age: 10 }]))
it("successfully finds a row with a high bound", () =>
expectQuery({
range: { age: { low: 5, high: 10 } },
}).toFind([{ age: 10 }]))
}).toContainExactly([{ age: 10 }]))
it("successfully finds no rows", () =>
expectQuery({
range: { age: { low: 5, high: 9 } },
}).toFindNothing())
})
describe("sort", () => {
it("sorts ascending", () =>
expectSearch({
query: {},
sort: "age",
sortOrder: SortOrder.ASCENDING,
}).toMatchExactly([{ age: 1 }, { age: 10 }]))
it("sorts descending", () =>
expectSearch({
query: {},
sort: "age",
sortOrder: SortOrder.DESCENDING,
}).toMatchExactly([{ age: 10 }, { age: 1 }]))
})
describe("sortType NUMBER", () => {
it("sorts ascending", () =>
expectSearch({
query: {},
sort: "age",
sortType: SortType.NUMBER,
sortOrder: SortOrder.ASCENDING,
}).toMatchExactly([{ age: 1 }, { age: 10 }]))
it("sorts descending", () =>
expectSearch({
query: {},
sort: "age",
sortType: SortType.NUMBER,
sortOrder: SortOrder.DESCENDING,
}).toMatchExactly([{ age: 10 }, { age: 1 }]))
})
})
@ -211,6 +377,7 @@ describe.each([
const JAN_1ST = "2020-01-01T00:00:00.000Z"
const JAN_2ND = "2020-01-02T00:00:00.000Z"
const JAN_5TH = "2020-01-05T00:00:00.000Z"
const JAN_9TH = "2020-01-09T00:00:00.000Z"
const JAN_10TH = "2020-01-10T00:00:00.000Z"
beforeAll(async () => {
@ -223,7 +390,9 @@ describe.each([
describe("equal", () => {
it("successfully finds a row", () =>
expectQuery({ equal: { dob: JAN_1ST } }).toFind([{ dob: JAN_1ST }]))
expectQuery({ equal: { dob: JAN_1ST } }).toContainExactly([
{ dob: JAN_1ST },
]))
it("fails to find nonexistent row", () =>
expectQuery({ equal: { dob: JAN_2ND } }).toFindNothing())
@ -231,15 +400,21 @@ describe.each([
describe("notEqual", () => {
it("successfully finds a row", () =>
expectQuery({ notEqual: { dob: JAN_1ST } }).toFind([{ dob: JAN_10TH }]))
expectQuery({ notEqual: { dob: JAN_1ST } }).toContainExactly([
{ dob: JAN_10TH },
]))
it("fails to find nonexistent row", () =>
expectQuery({ notEqual: { dob: JAN_10TH } }).toFind([{ dob: JAN_1ST }]))
expectQuery({ notEqual: { dob: JAN_10TH } }).toContainExactly([
{ dob: JAN_1ST },
]))
})
describe("oneOf", () => {
it("successfully finds a row", () =>
expectQuery({ oneOf: { dob: [JAN_1ST] } }).toFind([{ dob: JAN_1ST }]))
expectQuery({ oneOf: { dob: [JAN_1ST] } }).toContainExactly([
{ dob: JAN_1ST },
]))
it("fails to find nonexistent row", () =>
expectQuery({ oneOf: { dob: [JAN_2ND] } }).toFindNothing())
@ -249,17 +424,130 @@ describe.each([
it("successfully finds a row", () =>
expectQuery({
range: { dob: { low: JAN_1ST, high: JAN_5TH } },
}).toFind([{ dob: JAN_1ST }]))
}).toContainExactly([{ dob: JAN_1ST }]))
it("successfully finds multiple rows", () =>
expectQuery({
range: { dob: { low: JAN_1ST, high: JAN_10TH } },
}).toFind([{ dob: JAN_1ST }, { dob: JAN_10TH }]))
}).toContainExactly([{ dob: JAN_1ST }, { dob: JAN_10TH }]))
it("successfully finds a row with a high bound", () =>
expectQuery({
range: { dob: { low: JAN_5TH, high: JAN_10TH } },
}).toFind([{ dob: JAN_10TH }]))
}).toContainExactly([{ dob: JAN_10TH }]))
it("successfully finds no rows", () =>
expectQuery({
range: { dob: { low: JAN_5TH, high: JAN_9TH } },
}).toFindNothing())
})
describe("sort", () => {
it("sorts ascending", () =>
expectSearch({
query: {},
sort: "dob",
sortOrder: SortOrder.ASCENDING,
}).toMatchExactly([{ dob: JAN_1ST }, { dob: JAN_10TH }]))
it("sorts descending", () =>
expectSearch({
query: {},
sort: "dob",
sortOrder: SortOrder.DESCENDING,
}).toMatchExactly([{ dob: JAN_10TH }, { dob: JAN_1ST }]))
describe("sortType STRING", () => {
it("sorts ascending", () =>
expectSearch({
query: {},
sort: "dob",
sortType: SortType.STRING,
sortOrder: SortOrder.ASCENDING,
}).toMatchExactly([{ dob: JAN_1ST }, { dob: JAN_10TH }]))
it("sorts descending", () =>
expectSearch({
query: {},
sort: "dob",
sortType: SortType.STRING,
sortOrder: SortOrder.DESCENDING,
}).toMatchExactly([{ dob: JAN_10TH }, { dob: JAN_1ST }]))
})
})
})
describe("array of strings", () => {
beforeAll(async () => {
await createTable({
numbers: {
name: "numbers",
type: FieldType.ARRAY,
constraints: { inclusion: ["one", "two", "three"] },
},
})
await createRows([{ numbers: ["one", "two"] }, { numbers: ["three"] }])
})
describe("contains", () => {
it("successfully finds a row", () =>
expectQuery({ contains: { numbers: ["one"] } }).toContainExactly([
{ numbers: ["one", "two"] },
]))
it("fails to find nonexistent row", () =>
expectQuery({ contains: { numbers: ["none"] } }).toFindNothing())
it("fails to find row containing all", () =>
expectQuery({
contains: { numbers: ["one", "two", "three"] },
}).toFindNothing())
it("finds all with empty list", () =>
expectQuery({ contains: { numbers: [] } }).toContainExactly([
{ numbers: ["one", "two"] },
{ numbers: ["three"] },
]))
})
describe("notContains", () => {
it("successfully finds a row", () =>
expectQuery({ notContains: { numbers: ["one"] } }).toContainExactly([
{ numbers: ["three"] },
]))
it("fails to find nonexistent row", () =>
expectQuery({
notContains: { numbers: ["one", "two", "three"] },
}).toContainExactly([
{ numbers: ["one", "two"] },
{ numbers: ["three"] },
]))
it("finds all with empty list", () =>
expectQuery({ notContains: { numbers: [] } }).toContainExactly([
{ numbers: ["one", "two"] },
{ numbers: ["three"] },
]))
})
describe("containsAny", () => {
it("successfully finds rows", () =>
expectQuery({
containsAny: { numbers: ["one", "two", "three"] },
}).toContainExactly([
{ numbers: ["one", "two"] },
{ numbers: ["three"] },
]))
it("fails to find nonexistent row", () =>
expectQuery({ containsAny: { numbers: ["none"] } }).toFindNothing())
it("finds all with empty list", () =>
expectQuery({ containsAny: { numbers: [] } }).toContainExactly([
{ numbers: ["one", "two"] },
{ numbers: ["three"] },
]))
})
})
})

View File

@ -20,6 +20,7 @@ export enum FilterTypes {
NOT_EMPTY = "notEmpty",
CONTAINS = "contains",
NOT_CONTAINS = "notContains",
CONTAINS_ANY = "containsAny",
ONE_OF = "oneOf",
}
@ -30,6 +31,7 @@ export const NoEmptyFilterStrings = [
FilterTypes.NOT_EQUAL,
FilterTypes.CONTAINS,
FilterTypes.NOT_CONTAINS,
FilterTypes.CONTAINS_ANY,
]
export const CanSwitchTypes = [

View File

@ -40,6 +40,7 @@ export const USER_METDATA_PREFIX = `${DocumentType.ROW}${SEPARATOR}${dbCore.Inte
export const LINK_USER_METADATA_PREFIX = `${DocumentType.LINK}${SEPARATOR}${dbCore.InternalTable.USER_METADATA}${SEPARATOR}`
export const TABLE_ROW_PREFIX = `${DocumentType.ROW}${SEPARATOR}${DocumentType.TABLE}`
export const AUTOMATION_LOG_PREFIX = `${DocumentType.AUTOMATION_LOG}${SEPARATOR}`
export const SQS_DATASOURCE_INTERNAL = "internal"
export const ViewName = dbCore.ViewName
export const InternalTables = dbCore.InternalTable
export const UNICODE_MAX = dbCore.UNICODE_MAX

View File

@ -28,6 +28,7 @@ const DEFAULTS = {
PLUGINS_DIR: "/plugins",
FORKED_PROCESS_NAME: "main",
JS_RUNNER_MEMORY_LIMIT: 64,
COUCH_DB_SQL_URL: "http://localhost:4006",
}
const QUERY_THREAD_TIMEOUT =
@ -39,6 +40,7 @@ const environment = {
// important - prefer app port to generic port
PORT: process.env.APP_PORT || process.env.PORT,
COUCH_DB_URL: process.env.COUCH_DB_URL,
COUCH_DB_SQL_URL: process.env.COUCH_DB_SQL_URL || DEFAULTS.COUCH_DB_SQL_URL,
MINIO_URL: process.env.MINIO_URL,
WORKER_URL: process.env.WORKER_URL,
AWS_REGION: process.env.AWS_REGION,

View File

@ -22,6 +22,8 @@ import {
SortDirection,
SqlQueryBinding,
Table,
TableSourceType,
INTERNAL_TABLE_SOURCE_ID,
} from "@budibase/types"
import environment from "../../environment"
@ -135,6 +137,18 @@ function generateSelectStatement(
})
}
function getTableName(table?: Table): string | undefined {
// SQS uses the table ID rather than the table name
if (
table?.sourceType === TableSourceType.INTERNAL ||
table?.sourceId === INTERNAL_TABLE_SOURCE_ID
) {
return table?._id
} else {
return table?.name
}
}
class InternalBuilder {
private readonly client: string
@ -149,7 +163,7 @@ class InternalBuilder {
tableName: string,
opts: { aliases?: Record<string, string>; relationship?: boolean }
): Knex.QueryBuilder {
function getTableName(name: string) {
function getTableAlias(name: string) {
const alias = opts.aliases?.[name]
return alias || name
}
@ -161,11 +175,11 @@ class InternalBuilder {
const updatedKey = dbCore.removeKeyNumbering(key)
const isRelationshipField = updatedKey.includes(".")
if (!opts.relationship && !isRelationshipField) {
fn(`${getTableName(tableName)}.${updatedKey}`, value)
fn(`${getTableAlias(tableName)}.${updatedKey}`, value)
}
if (opts.relationship && isRelationshipField) {
const [filterTableName, property] = updatedKey.split(".")
fn(`${getTableName(filterTableName)}.${property}`, value)
fn(`${getTableAlias(filterTableName)}.${property}`, value)
}
}
}
@ -233,6 +247,11 @@ class InternalBuilder {
(statement ? andOr : "") +
`LOWER(${likeKey(this.client, key)}) LIKE ?`
}
if (statement === "") {
return
}
// @ts-ignore
query = query[rawFnc](`${not}(${statement})`, value)
})
@ -341,9 +360,10 @@ class InternalBuilder {
addSorting(query: Knex.QueryBuilder, json: QueryJson): Knex.QueryBuilder {
let { sort, paginate } = json
const table = json.meta?.table
const tableName = getTableName(table)
const aliases = json.tableAliases
const aliased =
table?.name && aliases?.[table.name] ? aliases[table.name] : table?.name
tableName && aliases?.[tableName] ? aliases[tableName] : table?.name
if (sort && Object.keys(sort || {}).length > 0) {
for (let [key, value] of Object.entries(sort)) {
const direction =
@ -724,12 +744,13 @@ class SqlQueryBuilder extends SqlTableQueryBuilder {
results: Record<string, any>[],
aliases?: Record<string, string>
): Record<string, any>[] {
const tableName = getTableName(table)
for (const [name, field] of Object.entries(table.schema)) {
if (!this._isJsonColumn(field)) {
continue
}
const tableName = aliases?.[table.name] || table.name
const fullName = `${tableName}.${name}`
const aliasedTableName = (tableName && aliases?.[tableName]) || tableName
const fullName = `${aliasedTableName}.${name}`
for (let row of results) {
if (typeof row[fullName] === "string") {
row[fullName] = JSON.parse(row[fullName])

View File

@ -1,19 +1,20 @@
import { Knex, knex } from "knex"
import {
RelationshipType,
FieldSubtype,
FieldType,
NumberFieldMetadata,
Operation,
QueryJson,
RelationshipType,
RenameColumn,
Table,
FieldType,
SqlQuery,
Table,
TableSourceType,
} from "@budibase/types"
import { breakExternalTableId, getNativeSql, SqlClient } from "../utils"
import { utils } from "@budibase/shared-core"
import SchemaBuilder = Knex.SchemaBuilder
import CreateTableBuilder = Knex.CreateTableBuilder
import { utils } from "@budibase/shared-core"
function isIgnoredType(type: FieldType) {
const ignored = [FieldType.LINK, FieldType.FORMULA]
@ -105,13 +106,13 @@ function generateSchema(
column.relationshipType !== RelationshipType.MANY_TO_MANY
) {
if (!column.foreignKey || !column.tableId) {
throw "Invalid relationship schema"
throw new Error("Invalid relationship schema")
}
const { tableName } = breakExternalTableId(column.tableId)
// @ts-ignore
const relatedTable = tables[tableName]
if (!relatedTable) {
throw "Referenced table doesn't exist"
throw new Error("Referenced table doesn't exist")
}
const relatedPrimary = relatedTable.primary[0]
const externalType = relatedTable.schema[relatedPrimary].externalType
@ -209,15 +210,19 @@ class SqlTableQueryBuilder {
let query: Knex.SchemaBuilder
if (!json.table || !json.meta || !json.meta.tables) {
throw "Cannot execute without table being specified"
throw new Error("Cannot execute without table being specified")
}
if (json.table.sourceType === TableSourceType.INTERNAL) {
throw new Error("Cannot perform table actions for SQS.")
}
switch (this._operation(json)) {
case Operation.CREATE_TABLE:
query = buildCreateTable(client, json.table, json.meta.tables)
break
case Operation.UPDATE_TABLE:
if (!json.meta || !json.meta.table) {
throw "Must specify old table for update"
throw new Error("Must specify old table for update")
}
// renameColumn does not work for MySQL, so return a raw query
if (this.sqlClient === SqlClient.MY_SQL && json.meta.renamed) {
@ -264,7 +269,7 @@ class SqlTableQueryBuilder {
query = buildDeleteTable(client, json.table)
break
default:
throw "Table operation is of unknown type"
throw new Error("Table operation is of unknown type")
}
return getNativeSql(query)
}

View File

@ -9,6 +9,14 @@ import {
} from "@budibase/types"
const TABLE_NAME = "test"
const TABLE: Table = {
type: "table",
sourceType: TableSourceType.EXTERNAL,
sourceId: "SOURCE_ID",
schema: {},
name: TABLE_NAME,
primary: ["id"],
}
function endpoint(table: any, operation: any) {
return {
@ -25,6 +33,10 @@ function generateReadJson({
sort,
paginate,
}: any = {}): QueryJson {
const tableObj = { ...TABLE }
if (table) {
tableObj.name = table
}
return {
endpoint: endpoint(table || TABLE_NAME, "READ"),
resource: {
@ -34,14 +46,7 @@ function generateReadJson({
sort: sort || {},
paginate: paginate || {},
meta: {
table: {
type: "table",
sourceType: TableSourceType.EXTERNAL,
sourceId: "SOURCE_ID",
schema: {},
name: table || TABLE_NAME,
primary: ["id"],
} as any,
table: tableObj,
},
}
}
@ -49,6 +54,9 @@ function generateReadJson({
function generateCreateJson(table = TABLE_NAME, body = {}): QueryJson {
return {
endpoint: endpoint(table, "CREATE"),
meta: {
table: TABLE,
},
body,
}
}
@ -58,7 +66,15 @@ function generateUpdateJson({
body = {},
filters = {},
meta = {},
}: {
table: string
body?: any
filters?: any
meta?: any
}): QueryJson {
if (!meta.table) {
meta.table = table
}
return {
endpoint: endpoint(table, "UPDATE"),
filters,
@ -70,6 +86,9 @@ function generateUpdateJson({
function generateDeleteJson(table = TABLE_NAME, filters = {}): QueryJson {
return {
endpoint: endpoint(table, "DELETE"),
meta: {
table: TABLE,
},
filters,
}
}
@ -102,6 +121,9 @@ function generateRelationshipJson(config: { schema?: string } = {}): QueryJson {
},
],
extra: { idFilter: {} },
meta: {
table: TABLE,
},
}
}

View File

@ -4,12 +4,26 @@ import {
QueryJson,
SourceName,
SqlQuery,
Table,
TableSourceType,
} from "@budibase/types"
import { join } from "path"
import Sql from "../base/sql"
import { SqlClient } from "../utils"
import AliasTables from "../../api/controllers/row/alias"
import { generator } from "@budibase/backend-core/tests"
import sdk from "../../sdk"
// this doesn't exist strictly
const TABLE: Table = {
type: "table",
sourceType: TableSourceType.EXTERNAL,
sourceId: "SOURCE_ID",
schema: {},
name: "tableName",
primary: ["id"],
}
const AliasTables = sdk.rows.AliasTables
function multiline(sql: string) {
return sql.replace(/\n/g, "").replace(/ +/g, " ")
@ -220,6 +234,9 @@ describe("Captures of real examples", () => {
resource: {
fields,
},
meta: {
table: TABLE,
},
}
}

View File

@ -3,6 +3,7 @@ import * as rows from "./rows"
import * as search from "./search"
import * as utils from "./utils"
import * as external from "./external"
import AliasTables from "./sqlAlias"
export default {
...attachments,
@ -10,4 +11,5 @@ export default {
...search,
utils,
external,
AliasTables,
}

View File

@ -13,6 +13,8 @@ import * as sqs from "./search/sqs"
import env from "../../../environment"
import { ExportRowsParams, ExportRowsResult } from "./search/types"
import { dataFilters } from "@budibase/shared-core"
import sdk from "../../index"
import { searchInputMapping } from "./search/utils"
export { isValidFilter } from "../../../integrations/utils"
@ -29,6 +31,10 @@ function pickApi(tableId: any) {
return internal
}
function isEmptyArray(value: any) {
return Array.isArray(value) && value.length === 0
}
// don't do a pure falsy check, as 0 is included
// https://github.com/Budibase/budibase/issues/10118
export function removeEmptyFilters(filters: SearchFilters) {
@ -47,7 +53,7 @@ export function removeEmptyFilters(filters: SearchFilters) {
for (let [key, value] of Object.entries(
filters[filterType] as object
)) {
if (value == null || value === "") {
if (value == null || value === "" || isEmptyArray(value)) {
// @ts-ignore
delete filters[filterField][key]
}
@ -72,12 +78,15 @@ export async function search(
}
}
const table = await sdk.tables.getTable(options.tableId)
options = searchInputMapping(table, options)
if (isExternalTable) {
return external.search(options)
return external.search(options, table)
} else if (env.SQS_SEARCH_ENABLE) {
return sqs.search(options)
return sqs.search(options, table)
} else {
return internal.search(options)
return internal.search(options, table)
}
}

View File

@ -8,6 +8,7 @@ import {
SearchFilters,
RowSearchParams,
SearchResponse,
Table,
} from "@budibase/types"
import * as exporters from "../../../../api/controllers/view/exporters"
import { handleRequest } from "../../../../api/controllers/row/external"
@ -18,13 +19,13 @@ import {
import { utils } from "@budibase/shared-core"
import { ExportRowsParams, ExportRowsResult } from "./types"
import { HTTPError, db } from "@budibase/backend-core"
import { searchInputMapping } from "./utils"
import pick from "lodash/pick"
import { outputProcessing } from "../../../../utilities/rowProcessor"
import sdk from "../../../"
export async function search(
options: RowSearchParams
options: RowSearchParams,
table: Table
): Promise<SearchResponse<Row>> {
const { tableId } = options
const { paginate, query, ...params } = options
@ -68,8 +69,6 @@ export async function search(
}
try {
const table = await sdk.tables.getTable(tableId)
options = searchInputMapping(table, options)
let rows = await handleRequest(Operation.READ, tableId, {
filters: query,
sort,
@ -150,11 +149,15 @@ export async function exportRows(
}
const datasource = await sdk.datasources.get(datasourceId!)
const table = await sdk.tables.getTable(tableId)
if (!datasource || !datasource.entities) {
throw new HTTPError("Datasource has not been configured for plus API.", 400)
}
let result = await search({ tableId, query: requestQuery, sort, sortOrder })
let result = await search(
{ tableId, query: requestQuery, sort, sortOrder },
table
)
let rows: Row[] = []
let headers

View File

@ -1,6 +1,6 @@
import { context, db, HTTPError } from "@budibase/backend-core"
import env from "../../../../environment"
import { fullSearch, paginatedSearch, searchInputMapping } from "./utils"
import { fullSearch, paginatedSearch } from "./utils"
import { getRowParams, InternalTables } from "../../../../db/utils"
import {
Database,
@ -33,7 +33,8 @@ import pick from "lodash/pick"
import { breakRowIdField } from "../../../../integrations/utils"
export async function search(
options: RowSearchParams
options: RowSearchParams,
table: Table
): Promise<SearchResponse<Row>> {
const { tableId } = options
@ -51,8 +52,6 @@ export async function search(
query: {},
}
let table = await sdk.tables.getTable(tableId)
options = searchInputMapping(table, options)
if (params.sort && !params.sortType) {
const schema = table.schema
const sortField = schema[params.sort]
@ -122,12 +121,15 @@ export async function exportRows(
result = await outputProcessing<Row[]>(table, response)
} else if (query) {
let searchResponse = await search({
tableId,
query,
sort,
sortOrder,
})
let searchResponse = await search(
{
tableId,
query,
sort,
sortOrder,
},
table
)
result = searchResponse.rows
}

View File

@ -20,7 +20,12 @@ import {
} from "../../../../api/controllers/row/utils"
import sdk from "../../../index"
import { context } from "@budibase/backend-core"
import { CONSTANT_INTERNAL_ROW_COLS } from "../../../../db/utils"
import {
CONSTANT_INTERNAL_ROW_COLS,
SQS_DATASOURCE_INTERNAL,
} from "../../../../db/utils"
import AliasTables from "../sqlAlias"
import { outputProcessing } from "../../../../utilities/rowProcessor"
function buildInternalFieldList(
table: Table,
@ -31,19 +36,19 @@ function buildInternalFieldList(
fieldList = fieldList.concat(
CONSTANT_INTERNAL_ROW_COLS.map(col => `${table._id}.${col}`)
)
if (opts.relationships) {
for (let col of Object.values(table.schema)) {
if (col.type === FieldType.LINK) {
const linkCol = col as RelationshipFieldMetadata
const relatedTable = tables.find(
table => table._id === linkCol.tableId
)!
fieldList = fieldList.concat(
buildInternalFieldList(relatedTable, tables, { relationships: false })
)
} else {
fieldList.push(`${table._id}.${col.name}`)
}
for (let col of Object.values(table.schema)) {
const isRelationship = col.type === FieldType.LINK
if (!opts.relationships && isRelationship) {
continue
}
if (isRelationship) {
const linkCol = col as RelationshipFieldMetadata
const relatedTable = tables.find(table => table._id === linkCol.tableId)!
fieldList = fieldList.concat(
buildInternalFieldList(relatedTable, tables, { relationships: false })
)
} else {
fieldList.push(`${table._id}.${col.name}`)
}
}
return fieldList
@ -94,14 +99,14 @@ function buildTableMap(tables: Table[]) {
}
export async function search(
options: RowSearchParams
options: RowSearchParams,
table: Table
): Promise<SearchResponse<Row>> {
const { tableId, paginate, query, ...params } = options
const { paginate, query, ...params } = options
const builder = new SqlQueryBuilder(SqlClient.SQL_LITE)
const allTables = await sdk.tables.getAllInternalTables()
const allTablesMap = buildTableMap(allTables)
const table = allTables.find(table => table._id === tableId)
if (!table) {
throw new Error("Unable to find table")
}
@ -111,7 +116,7 @@ export async function search(
const request: QueryJson = {
endpoint: {
// not important, we query ourselves
datasourceId: "internal",
datasourceId: SQS_DATASOURCE_INTERNAL,
entityId: table._id!,
operation: Operation.READ,
},
@ -132,7 +137,7 @@ export async function search(
type: "row",
}
if (params.sort && !params.sortType) {
if (params.sort) {
const sortField = table.schema[params.sort]
const sortType =
sortField.type === FieldType.NUMBER ? SortType.NUMBER : SortType.STRING
@ -154,34 +159,44 @@ export async function search(
}
}
try {
const query = builder._query(request, {
disableReturning: true,
const alias = new AliasTables(allTables.map(table => table.name))
const rows = await alias.queryWithAliasing(request, async json => {
const query = builder._query(json, {
disableReturning: true,
})
if (Array.isArray(query)) {
throw new Error("SQS cannot currently handle multiple queries")
}
let sql = query.sql,
bindings = query.bindings
// quick hack for docIds
sql = sql.replace(/`doc1`.`rowId`/g, "`doc1.rowId`")
sql = sql.replace(/`doc2`.`rowId`/g, "`doc2.rowId`")
const db = context.getAppDB()
return await db.sql<Row>(sql, bindings)
})
if (Array.isArray(query)) {
throw new Error("SQS cannot currently handle multiple queries")
}
let sql = query.sql,
bindings = query.bindings
// quick hack for docIds
sql = sql.replace(/`doc1`.`rowId`/g, "`doc1.rowId`")
sql = sql.replace(/`doc2`.`rowId`/g, "`doc2.rowId`")
const db = context.getAppDB()
const rows = await db.sql<Row>(sql, bindings)
// process from the format of tableId.column to expected format
const processed = await sqlOutputProcessing(
rows,
table!,
allTablesMap,
relationships,
{
sqs: true,
}
)
return {
rows: await sqlOutputProcessing(
rows,
table!,
allTablesMap,
relationships,
{
sqs: true,
}
),
// final row processing for response
rows: await outputProcessing<Row[]>(table, processed, {
preserveLinks: true,
squash: true,
}),
}
} catch (err: any) {
const msg = typeof err === "string" ? err : err.message

View File

@ -112,7 +112,7 @@ describe("external search", () => {
tableId,
query: {},
}
const result = await search(searchParams)
const result = await search(searchParams, config.table!)
expect(result.rows).toHaveLength(10)
expect(result.rows).toEqual(
@ -130,7 +130,7 @@ describe("external search", () => {
query: {},
fields: ["name", "age"],
}
const result = await search(searchParams)
const result = await search(searchParams, config.table!)
expect(result.rows).toHaveLength(10)
expect(result.rows).toEqual(
@ -157,7 +157,7 @@ describe("external search", () => {
},
},
}
const result = await search(searchParams)
const result = await search(searchParams, config.table!)
expect(result.rows).toHaveLength(3)
expect(result.rows.map(row => row.id)).toEqual([1, 4, 8])

View File

@ -81,7 +81,7 @@ describe("internal", () => {
tableId,
query: {},
}
const result = await search(searchParams)
const result = await search(searchParams, config.table!)
expect(result.rows).toHaveLength(10)
expect(result.rows).toEqual(
@ -99,7 +99,7 @@ describe("internal", () => {
query: {},
fields: ["name", "age"],
}
const result = await search(searchParams)
const result = await search(searchParams, config.table!)
expect(result.rows).toHaveLength(10)
expect(result.rows).toEqual(

View File

@ -6,11 +6,12 @@ import {
Row,
SearchFilters,
} from "@budibase/types"
import { getSQLClient } from "../../../sdk/app/rows/utils"
import { getSQLClient } from "./utils"
import { cloneDeep } from "lodash"
import sdk from "../../../sdk"
import datasources from "../datasources"
import { makeExternalQuery } from "../../../integrations/base/query"
import { SqlClient } from "../../../integrations/utils"
import { SQS_DATASOURCE_INTERNAL } from "../../../db/utils"
const WRITE_OPERATIONS: Operation[] = [
Operation.CREATE,
@ -156,12 +157,19 @@ export default class AliasTables {
}
async queryWithAliasing(
json: QueryJson
json: QueryJson,
queryFn?: (json: QueryJson) => Promise<DatasourcePlusQueryResponse>
): Promise<DatasourcePlusQueryResponse> {
const datasourceId = json.endpoint.datasourceId
const datasource = await sdk.datasources.get(datasourceId)
const isSqs = datasourceId === SQS_DATASOURCE_INTERNAL
let aliasingEnabled: boolean, datasource: Datasource | undefined
if (isSqs) {
aliasingEnabled = true
} else {
datasource = await datasources.get(datasourceId)
aliasingEnabled = this.isAliasingEnabled(json, datasource)
}
const aliasingEnabled = this.isAliasingEnabled(json, datasource)
if (aliasingEnabled) {
json = cloneDeep(json)
// run through the query json to update anywhere a table may be used
@ -207,7 +215,15 @@ export default class AliasTables {
}
json.tableAliases = invertedTableAliases
}
const response = await makeExternalQuery(datasource, json)
let response: DatasourcePlusQueryResponse
if (datasource && !isSqs) {
response = await makeExternalQuery(datasource, json)
} else if (queryFn) {
response = await queryFn(json)
} else {
throw new Error("No supplied method to perform aliased query")
}
if (Array.isArray(response) && aliasingEnabled) {
return this.reverse(response)
} else {

View File

@ -1,8 +1,19 @@
import { context, SQLITE_DESIGN_DOC_ID } from "@budibase/backend-core"
import { FieldType, SQLiteDefinition, SQLiteType, Table } from "@budibase/types"
import {
FieldType,
RelationshipFieldMetadata,
SQLiteDefinition,
SQLiteTable,
SQLiteTables,
SQLiteType,
Table,
} from "@budibase/types"
import { cloneDeep } from "lodash"
import tablesSdk from "../"
import { CONSTANT_INTERNAL_ROW_COLS } from "../../../../db/utils"
import {
CONSTANT_INTERNAL_ROW_COLS,
generateJunctionTableID,
} from "../../../../db/utils"
const BASIC_SQLITE_DOC: SQLiteDefinition = {
_id: SQLITE_DESIGN_DOC_ID,
@ -36,9 +47,42 @@ const FieldTypeMap: Record<FieldType, SQLiteType> = {
[FieldType.BB_REFERENCE]: SQLiteType.TEXT,
}
function mapTable(table: Table): { [key: string]: SQLiteType } {
function buildRelationshipDefinitions(
table: Table,
relationshipColumn: RelationshipFieldMetadata
): {
tableId: string
definition: SQLiteTable
} {
const tableId = table._id!,
relatedTableId = relationshipColumn.tableId
return {
tableId: generateJunctionTableID(tableId, relatedTableId),
definition: {
["doc1.rowId"]: SQLiteType.TEXT,
["doc1.tableId"]: SQLiteType.TEXT,
["doc1.fieldName"]: SQLiteType.TEXT,
["doc2.rowId"]: SQLiteType.TEXT,
["doc2.tableId"]: SQLiteType.TEXT,
["doc2.fieldName"]: SQLiteType.TEXT,
tableId: SQLiteType.TEXT,
},
}
}
// this can generate relationship tables as part of the mapping
function mapTable(table: Table): SQLiteTables {
const tables: SQLiteTables = {}
const fields: Record<string, SQLiteType> = {}
for (let [key, column] of Object.entries(table.schema)) {
// relationships should be handled differently
if (column.type === FieldType.LINK) {
const { tableId, definition } = buildRelationshipDefinitions(
table,
column
)
tables[tableId] = { fields: definition }
}
if (!FieldTypeMap[column.type]) {
throw new Error(`Unable to map type "${column.type}" to SQLite type`)
}
@ -49,10 +93,12 @@ function mapTable(table: Table): { [key: string]: SQLiteType } {
CONSTANT_INTERNAL_ROW_COLS.forEach(col => {
constantMap[col] = SQLiteType.TEXT
})
return {
const thisTable: SQLiteTable = {
...constantMap,
...fields,
}
tables[table._id!] = { fields: thisTable }
return tables
}
// nothing exists, need to iterate though existing tables
@ -60,8 +106,9 @@ async function buildBaseDefinition(): Promise<SQLiteDefinition> {
const tables = await tablesSdk.getAllInternalTables()
const definition = cloneDeep(BASIC_SQLITE_DOC)
for (let table of tables) {
definition.sql.tables[table._id!] = {
fields: mapTable(table),
definition.sql.tables = {
...definition.sql.tables,
...mapTable(table),
}
}
return definition
@ -75,8 +122,9 @@ export async function addTableToSqlite(table: Table) {
} catch (err) {
definition = await buildBaseDefinition()
}
definition.sql.tables[table._id!] = {
fields: mapTable(table),
definition.sql.tables = {
...definition.sql.tables,
...mapTable(table),
}
await db.put(definition)
}

View File

@ -60,7 +60,10 @@ export class DatasourceAPI extends TestAPI {
})
}
query = async (query: QueryJson, expectations?: Expectations) => {
query = async (
query: Omit<QueryJson, "meta">,
expectations?: Expectations
) => {
return await this._post<any>(`/api/datasources/query`, {
body: query,
expectations,

View File

@ -102,6 +102,7 @@ export function isVerifiableSSOProvider(provider: AccountSSOProvider): boolean {
}
export interface AccountSSO {
ssoId?: string
provider: AccountSSOProvider
providerType: AccountSSOProviderType
oauth2?: OAuthTokens

View File

@ -1,22 +1,111 @@
import { Document } from "../document"
export enum FieldType {
/**
* a primitive type, stores a string, called Text within Budibase. This is one of the default
* types of Budibase, if an external type is not fully understood, we will treat it as text.
*/
STRING = "string",
/**
* similar to string type, called Long Form Text within Budibase. This is mainly a frontend
* orientated type which enables a larger text input area. This can also be used
* in conjunction with the 'useRichText' option to support a markdown editor/viewer.
*/
LONGFORM = "longform",
/**
* similar to string type, called Options within Budibase. This works very similarly to
* the string type within the backend, but is validated to a list of options. This will
* display a <select> input within the builder/client.
*/
OPTIONS = "options",
/**
* a primitive type, stores a number, as a floating point, called Number within Budibase.
* this type will always represent numbers as reals/floating point - there is no integer only
* type within Budibase.
*/
NUMBER = "number",
/**
* a primitive type, stores a boolean, called Boolean within Budibase. This is often represented
* as a toggle or checkbox within forms/grids.
*/
BOOLEAN = "boolean",
/**
* a JSON type, this type is always an array of strings, called Multi-select within Budibase.
* This type can be compared to the options type, as it functions similarly, but allows picking
* multiple options rather than a single option.
*/
ARRAY = "array",
/**
* a string type, this is always a string when input/returned from the API, called Date/Time within
* Budibase. We utilise ISO date strings for representing dates, this type has a range of subtypes
* to restrict it to date only, time only and ignore timezone capabilities.
*/
DATETIME = "datetime",
/**
* a JSON type, an array of metadata about files held in object storage, called Attachment List within
* Budibase. To utilise this type there is an API for uploading files to Budibase, which returns metadata
* that can be stored against columns of this type. Currently this is not supported on external databases.
*/
ATTACHMENTS = "attachment",
/**
* a JSON type, similar to the attachments type, called Attachment within Budibase. This type functions
* much the same as the attachment list, but only holds a single attachment metadata as an object.
* This simplifies the binding experience of using this column type.
*/
ATTACHMENT_SINGLE = "attachment_single",
/**
* a complex type, called Relationships within Budibase. This is the most complex type of Budibase,
* nothing should be stored against rows under link columns; this type simply represents the
* relationship between tables as part of the table schema. When rows are input to the Budibase API
* relationships to be made are represented as a list of row IDs to link. When rows are returned
* from the Budibase API it will contain a list of row IDs and display column values of the related rows.
*/
LINK = "link",
/**
* a complex type, called Formulas within Budibase. This type has two variants, static and dynamic, with
* static only being supported against internal tables. Dynamic formulas calculate a provided HBS/JS binding
* based on the row context and enrich it when rows are being returned from the API. Static bindings calculate
* this when rows are being stored, so that the formula output can be searched upon within the DB.
*/
FORMULA = "formula",
/**
* a complex type, called Auto Column within Budibase. This type has a few variants, with options such as a
* date for created at/updated at, an auto ID column with auto-increments as rows are saved and a user
* relationship type which stores the created by/updated by user details. These subtypes all depend on the
* date, number of link types respectively. There is one case where these will be executed in the browser,
* that is part of the initial formula definition, the formula will be live evaluated in the browser.
*/
AUTO = "auto",
/**
* a JSON type, called JSON within Budibase. This type allows any arbitrary JSON to be input to this column
* type, which will be represented as a JSON object in the row. This type depends on a schema being
* provided to make the JSON searchable/bindable, the JSON cannot be fully dynamic.
*/
JSON = "json",
/**
* @deprecated an internal type, this is an old deprecated type which is no longer used - still represented to note it
* could appear in very old tables.
*/
INTERNAL = "internal",
/**
* a string type, called Barcode/QR within Budibase. This type is used to denote to forms to that this column
* should be filled in using a camera to read a barcode, there is a form component which will be used when this
* type is found. The column will contain the contents of any barcode scanned.
*/
BARCODEQR = "barcodeqr",
/**
* a string type, this allows representing very large integers, but they are held/managed within Budibase as
* strings. When stored in external databases Budibase will attempt to use a real big integer type and depend
* on the database parsing the string to this type as part of saving.
*/
BIGINT = "bigint",
/**
* a JSON type, called User within Budibase. This type is used to represent a link to an internal Budibase
* resource, like a user or group, today only users are supported. This type will be represented as an
* array of internal resource IDs (e.g. user IDs) within the row - this ID list will be enriched with
* the full resources when rows are returned from the API. The full resources can be input to the API, or
* an array of resource IDs, the API will squash these down and validate them before saving the row.
*/
BB_REFERENCE = "bb_reference",
}

View File

@ -6,17 +6,23 @@ export enum SQLiteType {
NUMERIC = "NUMERIC",
}
export type SQLiteTable = Record<
string,
SQLiteType | { field: string; type: SQLiteType }
>
export type SQLiteTables = Record<
string,
{
fields: SQLiteTable
}
>
export interface SQLiteDefinition {
_id: string
language: string
sql: {
tables: {
[tableName: string]: {
fields: {
[key: string]: SQLiteType | { field: string; type: SQLiteType }
}
}
}
tables: SQLiteTables
options: {
table_name: string
}

View File

@ -90,8 +90,8 @@ export interface QueryJson {
paginate?: PaginationJson
body?: Row | Row[]
table?: Table
meta?: {
table?: Table
meta: {
table: Table
tables?: Record<string, Table>
renamed?: RenameColumn
}

31
scripts/deploy-camunda.sh Executable file
View File

@ -0,0 +1,31 @@
#!/bin/bash
yarn global add zbctl
export ZEEBE_ADDRESS='localhost:26500'
cd ../budibase-bpm
is_camunda_ready() {
if (zbctl --insecure status 2>/dev/null) | grep -q 'Healthy'; then
return 1
else
return 0
fi
}
docker-compose up -d
echo "waiting for Camunda to be ready..."
while is_camunda_ready -eq 0; do sleep 1; done
cd src/main/resources/models
echo "deploy processes..."
zbctl deploy resource offboarding.bpmn --insecure
zbctl deploy resource onboarding.bpmn --insecure
cd ../../../../../budibase/packages/account-portal/packages/server
yarn worker:run & cd ../../../.. && yarn dev:accountportal

View File

@ -5174,7 +5174,7 @@
dependencies:
"@types/node" "*"
"@types/archiver@^6.0.2":
"@types/archiver@6.0.2":
version "6.0.2"
resolved "https://registry.yarnpkg.com/@types/archiver/-/archiver-6.0.2.tgz#0daf8c83359cbde69de1e4b33dcade6a48a929e2"
integrity sha512-KmROQqbQzKGuaAbmK+ZcytkJ51+YqDa7NmbXjmtC5YBLSyQYo21YaUnQ3HbaPFKL1ooo6RQ6OPYPIDyxfpDDXw==