Skip to content

Commit e0c83ee

Browse files
committed
feature: token counting for different models
1 parent 8b7d835 commit e0c83ee

File tree

2 files changed

+31
-14
lines changed

2 files changed

+31
-14
lines changed

src/lib/tokenizer.ts

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,7 @@ const calculateToolCallsTokens = (
3737
let tokens = 0
3838
for (const toolCall of toolCalls) {
3939
tokens += constants.funcInit
40-
tokens += encoder.encode(toolCall.id).length
41-
tokens += encoder.encode(toolCall.type).length
42-
tokens += encoder.encode(toolCall.function.name).length
43-
tokens += encoder.encode(toolCall.function.arguments).length
40+
tokens += encoder.encode(JSON.stringify(toolCall)).length
4441
}
4542
tokens += constants.funcEnd
4643
return tokens
@@ -57,9 +54,6 @@ const calculateContentPartsTokens = (
5754
for (const part of contentParts) {
5855
if (part.type === "image_url") {
5956
tokens += encoder.encode(part.image_url.url).length + 85
60-
if (part.image_url.detail === "high") {
61-
tokens += 85
62-
}
6357
} else if (part.text) {
6458
tokens += encoder.encode(part.text).length
6559
}
@@ -346,6 +340,7 @@ export const getTokenCount = async (
346340
inputTokens += numTokensForTools(payload.tools, encoder, constants)
347341
}
348342
const outputTokens = calculateTokens(outputMessages, encoder, constants)
343+
349344
return {
350345
input: inputTokens,
351346
output: outputTokens,

src/routes/messages/count-tokens-handler.ts

Lines changed: 29 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,12 @@ import { translateToOpenAI } from "./non-stream-translation"
1313
*/
1414
export async function handleCountTokens(c: Context) {
1515
try {
16+
const anthropicBeta = c.req.header("anthropic-beta")
17+
1618
const anthropicPayload = await c.req.json<AnthropicMessagesPayload>()
1719

18-
// Convert to OpenAI format for token counting
1920
const openAIPayload = translateToOpenAI(anthropicPayload)
2021

21-
// Find the selected model
2222
const selectedModel = state.models?.data.find(
2323
(model) => model.id === anthropicPayload.model,
2424
)
@@ -30,17 +30,39 @@ export async function handleCountTokens(c: Context) {
3030
})
3131
}
3232

33-
// Calculate token count
3433
const tokenCount = await getTokenCount(openAIPayload, selectedModel)
35-
consola.debug("Token count:", tokenCount)
3634

37-
// Return response in Anthropic API format
35+
if (anthropicPayload.tools && anthropicPayload.tools.length > 0) {
36+
let mcpToolExist = false
37+
if (anthropicBeta?.startsWith("claude-code")) {
38+
mcpToolExist = anthropicPayload.tools.some((tool) =>
39+
tool.name.startsWith("mcp__"),
40+
)
41+
}
42+
if (!mcpToolExist) {
43+
if (anthropicPayload.model.startsWith("claude")) {
44+
// https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/overview#pricing
45+
tokenCount.input = tokenCount.input + 346
46+
} else if (anthropicPayload.model.startsWith("grok")) {
47+
tokenCount.input = tokenCount.input + 480
48+
}
49+
}
50+
}
51+
52+
let finalTokenCount = tokenCount.input + tokenCount.output
53+
if (anthropicPayload.model.startsWith("claude")) {
54+
finalTokenCount = Math.round(finalTokenCount * 1.15)
55+
} else if (anthropicPayload.model.startsWith("grok")) {
56+
finalTokenCount = Math.round(finalTokenCount * 1.03)
57+
}
58+
59+
consola.info("Token count:", finalTokenCount)
60+
3861
return c.json({
39-
input_tokens: tokenCount.input,
62+
input_tokens: finalTokenCount,
4063
})
4164
} catch (error) {
4265
consola.error("Error counting tokens:", error)
43-
// Return default value on error
4466
return c.json({
4567
input_tokens: 1,
4668
})

0 commit comments

Comments
 (0)