rely on llm status rather than initialised variable
This commit is contained in:
parent
82a1165077
commit
7fcff8a628
|
@ -1 +1 @@
|
|||
Subproject commit e60f4b1b364fd49d2bb082f298757f83cb2032f0
|
||||
Subproject commit 0d7fa31d4c4019690e2200323421025cdc74b89e
|
|
@ -126,8 +126,8 @@ export async function processAIColumns<T extends Row | Row[]>(
|
|||
const numRows = Array.isArray(inputRows) ? inputRows.length : 1
|
||||
span?.addTags({ table_id: table._id, numRows })
|
||||
const rows = Array.isArray(inputRows) ? inputRows : [inputRows]
|
||||
const llm = await pro.ai.LargeLanguageModel.forCurrentTenant("gpt-4o-mini")
|
||||
if (rows && llm.initialised) {
|
||||
const llmWrapper = await pro.ai.LargeLanguageModel.forCurrentTenant("gpt-4o-mini")
|
||||
if (rows && llmWrapper.llm) {
|
||||
// Ensure we have snippet context
|
||||
await context.ensureSnippetContext()
|
||||
|
||||
|
|
Loading…
Reference in New Issue