Add configurable token limit and truncation warning to Lab 1 confidence chat

This commit is contained in:
c4ch3c4d3
2026-04-27 10:58:13 -06:00
parent 269a4e4985
commit a7c1bda07c
5 changed files with 121 additions and 6 deletions
+24
View File
@@ -2,10 +2,12 @@ import { describe, expect, it } from "vitest";
import {
extractLab1AssistantContent,
extractLab1FinishReason,
extractLab1ResponseTokens,
formatProbabilityPercent,
getConfidenceBand,
logprobToProbabilityPercent,
parseLab1MaxTokens,
} from "~/lib/lab1-confidence";
describe("logprobToProbabilityPercent", () => {
@@ -30,6 +32,28 @@ describe("extractLab1AssistantContent", () => {
});
});
describe("extractLab1FinishReason", () => {
it("reads the upstream finish reason when it is present", () => {
expect(
extractLab1FinishReason({
choices: [
{
finish_reason: "length",
},
],
}),
).toBe("length");
});
});
describe("parseLab1MaxTokens", () => {
it("uses a bounded positive environment override", () => {
expect(parseLab1MaxTokens("768")).toBe(768);
expect(parseLab1MaxTokens("999999")).toBe(2048);
expect(parseLab1MaxTokens("nope")).toBe(512);
});
});
describe("extractLab1ResponseTokens", () => {
it("maps token logprobs and alternate candidates into display data", () => {
expect(