rely on llm status rather than initialised variable

This commit is contained in:
Martin McKeaveney 2024-12-04 10:15:22 +00:00
parent 82a1165077
commit 7fcff8a628
2 changed files with 3 additions and 3 deletions

@ -1 +1 @@
Subproject commit e60f4b1b364fd49d2bb082f298757f83cb2032f0
Subproject commit 0d7fa31d4c4019690e2200323421025cdc74b89e

View File

@ -126,8 +126,8 @@ export async function processAIColumns<T extends Row | Row[]>(
const numRows = Array.isArray(inputRows) ? inputRows.length : 1
span?.addTags({ table_id: table._id, numRows })
const rows = Array.isArray(inputRows) ? inputRows : [inputRows]
const llm = await pro.ai.LargeLanguageModel.forCurrentTenant("gpt-4o-mini")
if (rows && llm.initialised) {
const llmWrapper = await pro.ai.LargeLanguageModel.forCurrentTenant("gpt-4o-mini")
if (rows && llmWrapper.llm) {
// Ensure we have snippet context
await context.ensureSnippetContext()