Reformatting types to allow queue to be used for import and export.

This commit is contained in:
mike12345567 2022-10-17 19:42:36 +01:00
parent 8003f8b283
commit f795cb0e33
3 changed files with 55 additions and 36 deletions

View File

@ -5,37 +5,41 @@ import { Job } from "bull"
import fs from "fs" import fs from "fs"
import env from "../../../environment" import env from "../../../environment"
export async function init() { async function importProcessor(job: Job) {}
await backups.addAppBackupProcessor(async (job: Job) => {
const appId = job.data.appId, async function exportProcessor(job: Job) {
trigger = job.data.trigger, const appId = job.data.appId,
name = job.data.name trigger = job.data.trigger,
const tenantId = tenancy.getTenantIDFromAppID(appId) name = job.data.name
await tenancy.doInTenant(tenantId, async () => { const tenantId = tenancy.getTenantIDFromAppID(appId)
const createdAt = new Date().toISOString() await tenancy.doInTenant(tenantId, async () => {
const tarPath = await exportApp(appId, { tar: true }) const createdAt = new Date().toISOString()
let filename = `${appId}/backup-${createdAt}.tar.gz` const tarPath = await exportApp(appId, { tar: true })
// add the tenant to the bucket path if backing up within a multi-tenant environment let filename = `${appId}/backup-${createdAt}.tar.gz`
if (env.MULTI_TENANCY) { // add the tenant to the bucket path if backing up within a multi-tenant environment
filename = `${tenantId}/${filename}` if (env.MULTI_TENANCY) {
} filename = `${tenantId}/${filename}`
const bucket = objectStore.ObjectStoreBuckets.BACKUPS }
const metadata = { const bucket = objectStore.ObjectStoreBuckets.BACKUPS
appId, const metadata = {
createdAt, appId,
trigger, createdAt,
name, trigger,
} name,
await objectStore.upload({ }
path: tarPath, await objectStore.upload({
type: "application/gzip", path: tarPath,
bucket, type: "application/gzip",
filename, bucket,
metadata, filename,
}) metadata,
await backups.storeAppBackupMetadata(filename, metadata)
// clear up the tarball after uploading it
fs.rmSync(tarPath)
}) })
await backups.storeAppBackupMetadata(filename, metadata)
// clear up the tarball after uploading it
fs.rmSync(tarPath)
}) })
} }
export async function init() {
await backups.addAppBackupProcessors(importProcessor, exportProcessor)
}

View File

@ -1,5 +1,5 @@
import { db as dbCore } from "@budibase/backend-core" import { db as dbCore } from "@budibase/backend-core"
import { APP_PREFIX, TABLE_ROW_PREFIX } from "../../../db/utils" import { TABLE_ROW_PREFIX } from "../../../db/utils"
import { budibaseTempDir } from "../../../utilities/budibaseDir" import { budibaseTempDir } from "../../../utilities/budibaseDir"
import { import {
DB_EXPORT_FILE, DB_EXPORT_FILE,

View File

@ -6,6 +6,11 @@ export enum AppBackupTrigger {
SCHEDULED = "scheduled", SCHEDULED = "scheduled",
} }
export enum AppBackupEventType {
EXPORT = "export",
IMPORT = "import",
}
export interface AppBackup extends Document { export interface AppBackup extends Document {
trigger: AppBackupTrigger trigger: AppBackupTrigger
name: string name: string
@ -31,12 +36,22 @@ export type AppBackupFetchOpts = {
} }
export interface AppBackupQueueData { export interface AppBackupQueueData {
trigger: AppBackupTrigger eventType: AppBackupEventType
createdBy?: string
name?: string
appId: string appId: string
export?: {
trigger: AppBackupTrigger
name?: string
createdBy?: string
}
import?: {
backupId: string
}
} }
export interface AppBackupMetadata extends AppBackupQueueData { export interface AppBackupMetadata {
appId: string
trigger: AppBackupTrigger
name?: string
createdBy?: string
createdAt: string createdAt: string
} }