Skip to content

Commit 135a851

Browse files
committed
feat: add new models (gpt-5*) and update pricing data
1 parent 84e007d commit 135a851

46 files changed

Lines changed: 5133 additions & 227 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

src/__snapshots__/GptEncoding.test.ts.snap

Lines changed: 2571 additions & 31 deletions
Large diffs are not rendered by default.

src/codegen/modelScape.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -224,4 +224,4 @@ const codegen = function codegen(models, snapshots, otherModels = null) {
224224

225225
// set a breakpoint in https://platform.openai.com/docs/pricing
226226
// and run this entire file with the last uncommented to get the data:
227-
// copy(codegen(hC, pC, aC))
227+
// copy(codegen(uLe /* "./models-data/ */, dLe /* "./snapshots-data/ */, iLe /* name: "Other models" */))

src/model/gpt-3.5-turbo-1106.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ import { GptEncoding } from '../GptEncoding.js'
99
export * from '../constants.js'
1010
export * from '../specialTokens.js'
1111
// prettier-ignore
12-
const api = GptEncoding.getEncodingApiForModel('gpt-3.5-turbo-1106', () => bpeRanks, {name:"gpt-3.5-turbo-1106",slug:"gpt-3-5-turbo-1106",performance:1,latency:2,modalities:{input:["text"],output:["text"]},context_window:16385,max_output_tokens:4096,knowledge_cutoff:new Date(1630454400000),supported_features:["fine_tuning"],supported_endpoints:["chat_completions","responses","batch","fine_tuning"],reasoning_tokens:false,price_data:{main:{input:1,output:2},batch:{input:.5,output:1}}})
12+
const api = GptEncoding.getEncodingApiForModel('gpt-3.5-turbo-1106', () => bpeRanks, {name:"gpt-3.5-turbo-1106",slug:"gpt-3-5-turbo-1106",deprecated:true,performance:1,latency:2,modalities:{input:["text"],output:["text"]},context_window:16385,max_output_tokens:4096,knowledge_cutoff:new Date(1630454400000),supported_features:["fine_tuning"],supported_endpoints:["chat_completions","responses","batch","fine_tuning"],reasoning_tokens:false,price_data:{main:{input:1,output:2},batch:{input:.5,output:1}}})
1313
const {
1414
decode,
1515
decodeAsyncGenerator,

src/model/gpt-4-0125-preview.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ import { GptEncoding } from '../GptEncoding.js'
99
export * from '../constants.js'
1010
export * from '../specialTokens.js'
1111
// prettier-ignore
12-
const api = GptEncoding.getEncodingApiForModel('gpt-4-0125-preview', () => bpeRanks, {name:"gpt-4-0125-preview",slug:"gpt-4-0125-preview",performance:2,latency:3,modalities:{input:["text"],output:["text"]},context_window:128000,max_output_tokens:4096,knowledge_cutoff:new Date(1701388800000),supported_features:["fine_tuning"],supported_endpoints:["chat_completions","responses","assistants"],reasoning_tokens:false,price_data:{main:{input:10,output:30},batch:{input:5,output:15}}})
12+
const api = GptEncoding.getEncodingApiForModel('gpt-4-0125-preview', () => bpeRanks, {name:"gpt-4-0125-preview",slug:"gpt-4-0125-preview",deprecated:true,performance:2,latency:3,modalities:{input:["text"],output:["text"]},context_window:128000,max_output_tokens:4096,knowledge_cutoff:new Date(1701388800000),supported_features:["fine_tuning"],supported_endpoints:["chat_completions","responses","assistants"],reasoning_tokens:false,price_data:{main:{input:10,output:30},batch:{input:5,output:15}}})
1313
const {
1414
decode,
1515
decodeAsyncGenerator,

src/model/gpt-4-0314.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ import { GptEncoding } from '../GptEncoding.js'
99
export * from '../constants.js'
1010
export * from '../specialTokens.js'
1111
// prettier-ignore
12-
const api = GptEncoding.getEncodingApiForModel('gpt-4-0314', () => bpeRanks, {name:"gpt-4-0314",slug:"gpt-4-0314",performance:2,latency:3,modalities:{input:["text"],output:["text"]},context_window:8192,max_output_tokens:8192,knowledge_cutoff:new Date(1701388800000),supported_features:["fine_tuning","streaming"],supported_endpoints:["chat_completions","responses","assistants"],reasoning_tokens:false,price_data:{main:{input:30,output:60},batch:{input:15,output:30}}})
12+
const api = GptEncoding.getEncodingApiForModel('gpt-4-0314', () => bpeRanks, {name:"gpt-4-0314",slug:"gpt-4-0314",deprecated:true,performance:2,latency:3,modalities:{input:["text"],output:["text"]},context_window:8192,max_output_tokens:8192,knowledge_cutoff:new Date(1701388800000),supported_features:["fine_tuning","streaming"],supported_endpoints:["chat_completions","responses","assistants"],reasoning_tokens:false,price_data:{main:{input:30,output:60},batch:{input:15,output:30}}})
1313
const {
1414
decode,
1515
decodeAsyncGenerator,

src/model/gpt-4-turbo-preview.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ import { GptEncoding } from '../GptEncoding.js'
99
export * from '../constants.js'
1010
export * from '../specialTokens.js'
1111
// prettier-ignore
12-
const api = GptEncoding.getEncodingApiForModel('gpt-4-turbo-preview', () => bpeRanks, {name:"gpt-4-0125-preview",slug:"gpt-4-0125-preview",performance:2,latency:3,modalities:{input:["text"],output:["text"]},context_window:128000,max_output_tokens:4096,knowledge_cutoff:new Date(1701388800000),supported_features:["fine_tuning"],supported_endpoints:["chat_completions","responses","assistants"],reasoning_tokens:false,price_data:{main:{input:10,output:30},batch:{input:5,output:15}}})
12+
const api = GptEncoding.getEncodingApiForModel('gpt-4-turbo-preview', () => bpeRanks, {name:"gpt-4-0125-preview",slug:"gpt-4-0125-preview",deprecated:true,performance:2,latency:3,modalities:{input:["text"],output:["text"]},context_window:128000,max_output_tokens:4096,knowledge_cutoff:new Date(1701388800000),supported_features:["fine_tuning"],supported_endpoints:["chat_completions","responses","assistants"],reasoning_tokens:false,price_data:{main:{input:10,output:30},batch:{input:5,output:15}}})
1313
const {
1414
decode,
1515
decodeAsyncGenerator,

src/model/gpt-4.1-mini-2025-04-14.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ import { GptEncoding } from '../GptEncoding.js'
99
export * from '../constants.js'
1010
export * from '../specialTokens.js'
1111
// prettier-ignore
12-
const api = GptEncoding.getEncodingApiForModel('gpt-4.1-mini-2025-04-14', () => bpeRanks, {name:"gpt-4.1-mini-2025-04-14",slug:"gpt-4.1-mini-2025-04-14",performance:3,latency:4,modalities:{input:["text","image"],output:["text"]},context_window:1047576,max_output_tokens:32768,knowledge_cutoff:new Date(1717200000000),supported_features:["streaming","function_calling","fine_tuning","file_search","file_uploads","web_search","structured_outputs","image_input"],supported_endpoints:["chat_completions","responses","assistants","batch","fine_tuning"],reasoning_tokens:false})
12+
const api = GptEncoding.getEncodingApiForModel('gpt-4.1-mini-2025-04-14', () => bpeRanks, {name:"gpt-4.1-mini-2025-04-14",slug:"gpt-4.1-mini-2025-04-14",performance:3,latency:4,modalities:{input:["text","image"],output:["text"]},context_window:1047576,max_output_tokens:32768,knowledge_cutoff:new Date(1717200000000),supported_features:["predicted_outputs","streaming","function_calling","fine_tuning","file_search","file_uploads","web_search","structured_outputs","image_input"],supported_endpoints:["chat_completions","responses","assistants","batch","fine_tuning"],reasoning_tokens:false})
1313
const {
1414
decode,
1515
decodeAsyncGenerator,

src/model/gpt-4.1-mini.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ import { GptEncoding } from '../GptEncoding.js'
99
export * from '../constants.js'
1010
export * from '../specialTokens.js'
1111
// prettier-ignore
12-
const api = GptEncoding.getEncodingApiForModel('gpt-4.1-mini', () => bpeRanks, {name:"gpt-4.1-mini-2025-04-14",slug:"gpt-4.1-mini-2025-04-14",performance:3,latency:4,modalities:{input:["text","image"],output:["text"]},context_window:1047576,max_output_tokens:32768,knowledge_cutoff:new Date(1717200000000),supported_features:["streaming","function_calling","fine_tuning","file_search","file_uploads","web_search","structured_outputs","image_input"],supported_endpoints:["chat_completions","responses","assistants","batch","fine_tuning"],reasoning_tokens:false})
12+
const api = GptEncoding.getEncodingApiForModel('gpt-4.1-mini', () => bpeRanks, {name:"gpt-4.1-mini-2025-04-14",slug:"gpt-4.1-mini-2025-04-14",performance:3,latency:4,modalities:{input:["text","image"],output:["text"]},context_window:1047576,max_output_tokens:32768,knowledge_cutoff:new Date(1717200000000),supported_features:["predicted_outputs","streaming","function_calling","fine_tuning","file_search","file_uploads","web_search","structured_outputs","image_input"],supported_endpoints:["chat_completions","responses","assistants","batch","fine_tuning"],reasoning_tokens:false})
1313
const {
1414
decode,
1515
decodeAsyncGenerator,

src/model/gpt-4.1-nano-2025-04-14.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ import { GptEncoding } from '../GptEncoding.js'
99
export * from '../constants.js'
1010
export * from '../specialTokens.js'
1111
// prettier-ignore
12-
const api = GptEncoding.getEncodingApiForModel('gpt-4.1-nano-2025-04-14', () => bpeRanks, {name:"gpt-4.1-nano-2025-04-14",slug:"gpt-4.1-nano-2025-04-14",performance:2,latency:5,modalities:{input:["text","image"],output:["text"]},context_window:1047576,max_output_tokens:32768,knowledge_cutoff:new Date(1717200000000),supported_features:["streaming","function_calling","file_search","file_uploads","structured_outputs","image_input","prompt_caching","fine_tuning"],supported_endpoints:["chat_completions","responses","assistants","batch","fine_tuning"],reasoning_tokens:false})
12+
const api = GptEncoding.getEncodingApiForModel('gpt-4.1-nano-2025-04-14', () => bpeRanks, {name:"gpt-4.1-nano-2025-04-14",slug:"gpt-4.1-nano-2025-04-14",performance:2,latency:5,modalities:{input:["text","image"],output:["text"]},context_window:1047576,max_output_tokens:32768,knowledge_cutoff:new Date(1717200000000),supported_features:["predicted_outputs","streaming","function_calling","file_search","file_uploads","structured_outputs","image_input","prompt_caching","fine_tuning"],supported_endpoints:["chat_completions","responses","assistants","batch","fine_tuning"],reasoning_tokens:false})
1313
const {
1414
decode,
1515
decodeAsyncGenerator,

src/model/gpt-4.1-nano.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ import { GptEncoding } from '../GptEncoding.js'
99
export * from '../constants.js'
1010
export * from '../specialTokens.js'
1111
// prettier-ignore
12-
const api = GptEncoding.getEncodingApiForModel('gpt-4.1-nano', () => bpeRanks, {name:"gpt-4.1-nano-2025-04-14",slug:"gpt-4.1-nano-2025-04-14",performance:2,latency:5,modalities:{input:["text","image"],output:["text"]},context_window:1047576,max_output_tokens:32768,knowledge_cutoff:new Date(1717200000000),supported_features:["streaming","function_calling","file_search","file_uploads","structured_outputs","image_input","prompt_caching","fine_tuning"],supported_endpoints:["chat_completions","responses","assistants","batch","fine_tuning"],reasoning_tokens:false})
12+
const api = GptEncoding.getEncodingApiForModel('gpt-4.1-nano', () => bpeRanks, {name:"gpt-4.1-nano-2025-04-14",slug:"gpt-4.1-nano-2025-04-14",performance:2,latency:5,modalities:{input:["text","image"],output:["text"]},context_window:1047576,max_output_tokens:32768,knowledge_cutoff:new Date(1717200000000),supported_features:["predicted_outputs","streaming","function_calling","file_search","file_uploads","structured_outputs","image_input","prompt_caching","fine_tuning"],supported_endpoints:["chat_completions","responses","assistants","batch","fine_tuning"],reasoning_tokens:false})
1313
const {
1414
decode,
1515
decodeAsyncGenerator,

0 commit comments

Comments
 (0)