Cleaning up tables when they are deleted, refactored a bit to make more similar to table save.

This commit is contained in:
mike12345567 2024-04-23 13:34:50 +01:00
parent de1039fa92
commit 006addb9ca
3 changed files with 52 additions and 29 deletions

View File

@ -33,6 +33,7 @@ import {
} from "@budibase/types"
import sdk from "../../../sdk"
import env from "../../../environment"
import { runStaticFormulaChecks } from "./bulkFormula"
export async function clearColumns(table: Table, columnNames: string[]) {
const db = context.getAppDB()
@ -495,5 +496,31 @@ export function setStaticSchemas(datasource: Datasource, table: Table) {
return table
}
export async function internalTableCleanup(table: Table, rows?: Row[]) {
const db = context.getAppDB()
const tableId = table._id!
// remove table search index
if (!env.isTest() || env.COUCH_DB_URL) {
const currentIndexes = await db.getIndexes()
const existingIndex = currentIndexes.indexes.find(
(existing: any) => existing.name === `search:${tableId}`
)
if (existingIndex) {
await db.deleteIndex(existingIndex)
}
}
// has to run after, make sure it has _id
await runStaticFormulaChecks(table, {
deletion: true,
})
if (rows) {
await AttachmentCleanup.tableDelete(table, rows)
}
if (env.SQS_SEARCH_ENABLE) {
await sdk.tables.sqs.removeTableFromSqlite(table)
}
}
const _TableSaveFunctions = TableSaveFunctions
export { _TableSaveFunctions as TableSaveFunctions }

View File

@ -10,6 +10,7 @@ import {
import {
hasTypeChanged,
TableSaveFunctions,
internalTableCleanup,
} from "../../../../api/controllers/table/utils"
import { EventType, updateLinks } from "../../../../db/linkedRows"
import { cloneDeep } from "lodash/fp"
@ -128,16 +129,20 @@ export async function destroy(table: Table) {
const db = context.getAppDB()
const tableId = table._id!
// Delete all rows for that table
const rowsData = await db.allDocs(
getRowParams(tableId, null, {
include_docs: true,
})
)
await db.bulkDocs(
rowsData.rows.map((row: any) => ({ ...row.doc, _deleted: true }))
)
await quotas.removeRows(rowsData.rows.length, {
// Delete all rows for that table - we have to retrieve the full rows for
// attachment cleanup, this may be worth investigating if there is a better
// way - we could delete all rows without the `include_docs` which would be faster
const rows = (
await db.allDocs<Row>(
getRowParams(tableId, null, {
include_docs: true,
})
)
).rows.map(data => data.doc!)
await db.bulkDocs(rows.map((row: Row) => ({ ...row, _deleted: true })))
// remove rows from quota
await quotas.removeRows(rows.length, {
tableId,
})
@ -150,25 +155,8 @@ export async function destroy(table: Table) {
// don't remove the table itself until very end
await db.remove(tableId, table._rev)
// remove table search index
if (!env.isTest() || env.COUCH_DB_URL) {
const currentIndexes = await db.getIndexes()
const existingIndex = currentIndexes.indexes.find(
(existing: any) => existing.name === `search:${tableId}`
)
if (existingIndex) {
await db.deleteIndex(existingIndex)
}
}
// has to run after, make sure it has _id
await runStaticFormulaChecks(table, {
deletion: true,
})
await AttachmentCleanup.tableDelete(
table,
rowsData.rows.map((row: any) => row.doc)
)
// final cleanup, attachments, indexes, SQS
await internalTableCleanup(table, rows)
return { table }
}

View File

@ -5930,6 +5930,14 @@
resolved "https://registry.yarnpkg.com/@types/range-parser/-/range-parser-1.2.4.tgz#cd667bcfdd025213aafb7ca5915a932590acdcdc"
integrity sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw==
"@types/readable-stream@^4.0.0":
version "4.0.11"
resolved "https://registry.yarnpkg.com/@types/readable-stream/-/readable-stream-4.0.11.tgz#684f1e947c90cb6a8ad3904523d650bb66cdbb84"
integrity sha512-R3eUMUTTKoIoaz7UpYLxvZCrOmCRPRbAmoDDHKcimTEySltaJhF8hLzj4+EzyDifiX5eK6oDQGSfmNnXjxZzYQ==
dependencies:
"@types/node" "*"
safe-buffer "~5.1.1"
"@types/readdir-glob@*":
version "1.1.5"
resolved "https://registry.yarnpkg.com/@types/readdir-glob/-/readdir-glob-1.1.5.tgz#21a4a98898fc606cb568ad815f2a0eedc24d412a"