Merge branch 'feature/app-backups' of github.com:Budibase/budibase into feature/backups-ui

This commit is contained in:
mike12345567 2022-10-21 16:09:18 +01:00
commit d543b52c19
4 changed files with 105 additions and 59 deletions

View File

@ -22,7 +22,7 @@ export function createQueue<T>(
): BullQueue.Queue<T> {
const queueConfig: any = redisProtocolUrl || { redis: opts }
let queue: any
if (env.isTest()) {
if (!env.isTest()) {
queue = new BullQueue(jobQueue, queueConfig)
} else {
queue = new InMemoryQueue(jobQueue, queueConfig)

View File

@ -20,6 +20,7 @@ import {
import { events } from "@budibase/backend-core"
import { backups } from "@budibase/pro"
import { AppBackupTrigger } from "@budibase/types"
import env from "../../../environment"
// the max time we can wait for an invalidation to complete before considering it failed
const MAX_PENDING_TIME_MS = 30 * 60000
@ -107,10 +108,17 @@ async function deployApp(deployment: any, userId: string) {
const devAppId = getDevelopmentAppID(appId)
const productionAppId = getProdAppID(appId)
// trigger backup initially
await backups.triggerAppBackup(productionAppId, AppBackupTrigger.PUBLISH, {
createdBy: userId,
})
// can't do this in test
if (!env.isTest()) {
// trigger backup initially
await backups.triggerAppBackup(
productionAppId,
AppBackupTrigger.PUBLISH,
{
createdBy: userId,
}
)
}
const config: any = {
source: devAppId,

View File

@ -1,6 +1,11 @@
import { backups } from "@budibase/pro"
import { db as dbCore, objectStore, tenancy } from "@budibase/backend-core"
import { AppBackupQueueData, AppBackupStatus } from "@budibase/types"
import {
AppBackupQueueData,
AppBackupStatus,
AppBackupTrigger,
AppBackupType,
} from "@budibase/types"
import { exportApp } from "./exports"
import { importApp } from "./imports"
import { calculateBackupStats } from "../statistics"
@ -8,19 +13,96 @@ import { Job } from "bull"
import fs from "fs"
import env from "../../../environment"
type BackupOpts = {
doc?: { id: string; rev: string }
createdBy?: string
}
async function removeExistingApp(devId: string) {
const devDb = dbCore.dangerousGetDB(devId, { skip_setup: true })
await devDb.destroy()
}
async function runBackup(
name: string,
trigger: AppBackupTrigger,
tenantId: string,
appId: string,
opts?: BackupOpts
) {
const devAppId = dbCore.getDevAppID(appId),
prodAppId = dbCore.getProdAppID(appId)
const timestamp = new Date().toISOString()
const tarPath = await exportApp(devAppId, { tar: true })
const contents = await calculateBackupStats(devAppId)
let filename = `${prodAppId}/backup-${timestamp}.tar.gz`
// add the tenant to the bucket path if backing up within a multi-tenant environment
if (env.MULTI_TENANCY) {
filename = `${tenantId}/${filename}`
}
const bucket = objectStore.ObjectStoreBuckets.BACKUPS
await objectStore.upload({
path: tarPath,
type: "application/gzip",
bucket,
filename,
metadata: {
name,
trigger,
timestamp,
appId: prodAppId,
},
})
if (opts?.doc) {
await backups.updateBackupStatus(
opts.doc.id,
opts.doc.rev,
AppBackupStatus.COMPLETE,
contents,
filename
)
} else {
await backups.storeAppBackupMetadata(
{
appId: prodAppId,
timestamp,
name,
trigger,
type: AppBackupType.BACKUP,
status: AppBackupStatus.COMPLETE,
contents,
createdBy: opts?.createdBy,
},
{ filename }
)
}
// clear up the tarball after uploading it
fs.rmSync(tarPath)
}
async function importProcessor(job: Job) {
const data: AppBackupQueueData = job.data
const appId = data.appId,
backupId = data.import!.backupId
const tenantId = tenancy.getTenantIDFromAppID(appId)
backupId = data.import!.backupId,
nameForBackup = data.import!.nameForBackup,
createdBy = data.import!.createdBy
const tenantId = tenancy.getTenantIDFromAppID(appId) as string
tenancy.doInTenant(tenantId, async () => {
const devAppId = dbCore.getDevAppID(appId)
const performImport = async (path: string) => {
// initially export the current state to disk - incase something goes wrong
await runBackup(
nameForBackup,
AppBackupTrigger.RESTORING,
tenantId,
appId,
{ createdBy }
)
// get the backup ready on disk
const { path } = await backups.downloadAppBackup(backupId)
// start by removing app database and contents of bucket - which will be updated
await removeExistingApp(devAppId)
let status = AppBackupStatus.COMPLETE
try {
await importApp(devAppId, dbCore.dangerousGetDB(devAppId), {
file: {
type: "application/gzip",
@ -28,26 +110,10 @@ async function importProcessor(job: Job) {
},
key: path,
})
}
// initially export the current state to disk - incase something goes wrong
const backupTarPath = await exportApp(devAppId, { tar: true })
// get the backup ready on disk
const { path } = await backups.downloadAppBackup(backupId)
// start by removing app database and contents of bucket - which will be updated
await removeExistingApp(devAppId)
try {
await performImport(path)
} catch (err) {
// rollback - clear up failed import and re-import the pre-backup
await removeExistingApp(devAppId)
await performImport(backupTarPath)
status = AppBackupStatus.FAILED
}
await backups.updateRestoreStatus(
data.docId,
data.docRev,
AppBackupStatus.COMPLETE
)
fs.rmSync(backupTarPath)
await backups.updateRestoreStatus(data.docId, data.docRev, status)
})
}
@ -56,40 +122,11 @@ async function exportProcessor(job: Job) {
const appId = data.appId,
trigger = data.export!.trigger,
name = data.export!.name || `${trigger} - backup`
const tenantId = tenancy.getTenantIDFromAppID(appId)
const tenantId = tenancy.getTenantIDFromAppID(appId) as string
await tenancy.doInTenant(tenantId, async () => {
const devAppId = dbCore.getDevAppID(appId),
prodAppId = dbCore.getProdAppID(appId)
const timestamp = new Date().toISOString()
const tarPath = await exportApp(devAppId, { tar: true })
const contents = await calculateBackupStats(devAppId)
let filename = `${prodAppId}/backup-${timestamp}.tar.gz`
// add the tenant to the bucket path if backing up within a multi-tenant environment
if (env.MULTI_TENANCY) {
filename = `${tenantId}/${filename}`
}
const bucket = objectStore.ObjectStoreBuckets.BACKUPS
await objectStore.upload({
path: tarPath,
type: "application/gzip",
bucket,
filename,
metadata: {
name,
trigger,
timestamp,
appId: prodAppId,
},
return runBackup(name, trigger, tenantId, appId, {
doc: { id: data.docId, rev: data.docRev },
})
await backups.updateBackupStatus(
data.docId,
data.docRev,
AppBackupStatus.COMPLETE,
contents,
filename
)
// clear up the tarball after uploading it
fs.rmSync(tarPath)
})
}

View File

@ -16,6 +16,7 @@ export enum AppBackupTrigger {
PUBLISH = "publish",
MANUAL = "manual",
SCHEDULED = "scheduled",
RESTORING = "restoring",
}
export interface AppBackupContents {