Initial work to get file streaming working when dealing with a large amount of file exports, tested with up to 1.5GB of attachments.

This commit is contained in:
mike12345567 2023-12-01 18:36:40 +00:00
parent fcb862c82f
commit 00a3c630ef
1 changed files with 20 additions and 7 deletions

View File

@ -305,20 +305,33 @@ export async function retrieveDirectory(bucketName: string, path: string) {
let writePath = join(budibaseTempDir(), v4()) let writePath = join(budibaseTempDir(), v4())
fs.mkdirSync(writePath) fs.mkdirSync(writePath)
const objects = await listAllObjects(bucketName, path) const objects = await listAllObjects(bucketName, path)
let fullObjects = await Promise.all( let streams = await Promise.all(
objects.map(obj => retrieve(bucketName, obj.Key!)) objects.map(obj => getReadStream(bucketName, obj.Key!))
) )
let count = 0 let count = 0
const writePromises: Promise<Error>[] = []
for (let obj of objects) { for (let obj of objects) {
const filename = obj.Key! const filename = obj.Key!
const data = fullObjects[count++] const stream = streams[count++]
const possiblePath = filename.split("/") const possiblePath = filename.split("/")
if (possiblePath.length > 1) { const dirs = possiblePath.slice(0, possiblePath.length - 1)
const dirs = possiblePath.slice(0, possiblePath.length - 1) const possibleDir = join(writePath, ...dirs)
fs.mkdirSync(join(writePath, ...dirs), { recursive: true }) if (possiblePath.length > 1 && !fs.existsSync(possibleDir)) {
fs.mkdirSync(possibleDir, { recursive: true })
} }
fs.writeFileSync(join(writePath, ...possiblePath), data) const writeStream = fs.createWriteStream(join(writePath, ...possiblePath), {
mode: 0o644,
})
stream.pipe(writeStream)
writePromises.push(
new Promise((resolve, reject) => {
stream.on("finish", resolve)
stream.on("error", reject)
writeStream.on("error", reject)
})
)
} }
await Promise.all(writePromises)
return writePath return writePath
} }