Skip to content

Commit 2edb2ac

Browse files
Merge pull request #43 from codinit-dev/42-add-anthropic-opus-latest
Changelog ✅ Added Claude Opus 4.5 to Anthropic provider ✅ Added Claude Opus 4.5 to OpenRouter provider ✅ Added Claude 4.5 models (Opus, Sonnet, Haiku) to Amazon Bedrock ✅ Added GPT-5.2 models (Instant, Thinking, Pro) to OpenAI provider ✅ Added GPT-5.2 models (Instant, Thinking, Pro) to OpenRouter provider
2 parents e255c8f + 2e6cb05 commit 2edb2ac

File tree

5 files changed

+109
-6
lines changed

5 files changed

+109
-6
lines changed

app/lib/modules/llm/providers/amazon-bedrock.ts

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,27 @@ export default class AmazonBedrockProvider extends BaseProvider {
2121
};
2222

2323
staticModels: ModelInfo[] = [
24+
{
25+
name: 'anthropic.claude-opus-4-5-20251101-v1:0',
26+
label: 'Claude Opus 4.5 (Bedrock)',
27+
provider: 'AmazonBedrock',
28+
maxTokenAllowed: 200000,
29+
maxCompletionTokens: 64000,
30+
},
31+
{
32+
name: 'anthropic.claude-sonnet-4-5-20250929-v1:0',
33+
label: 'Claude Sonnet 4.5 (Bedrock)',
34+
provider: 'AmazonBedrock',
35+
maxTokenAllowed: 200000,
36+
maxCompletionTokens: 64000,
37+
},
38+
{
39+
name: 'anthropic.claude-haiku-4-5-20251001-v1:0',
40+
label: 'Claude Haiku 4.5 (Bedrock)',
41+
provider: 'AmazonBedrock',
42+
maxTokenAllowed: 200000,
43+
maxCompletionTokens: 64000,
44+
},
2445
{
2546
name: 'anthropic.claude-3-5-sonnet-20241022-v2:0',
2647
label: 'Claude 3.5 Sonnet v2 (Bedrock)',

app/lib/modules/llm/providers/anthropic.ts

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,17 @@ export default class AnthropicProvider extends BaseProvider {
1616
staticModels: ModelInfo[] = [
1717
/*
1818
* Essential fallback models - only the most stable/reliable ones
19-
* Claude Sonnet 4.5: 200k context, 64k output, best balance of intelligence and speed
19+
* Claude Opus 4.5: 200k context, 64k output, maximum intelligence with practical performance
2020
*/
21+
{
22+
name: 'claude-opus-4-5-20251101',
23+
label: 'Claude Opus 4.5',
24+
provider: 'Anthropic',
25+
maxTokenAllowed: 200000,
26+
maxCompletionTokens: 64000,
27+
},
28+
29+
// Claude Sonnet 4.5: 200k context, 64k output, best balance of intelligence and speed
2130
{
2231
name: 'claude-sonnet-4-5-20250929',
2332
label: 'Claude Sonnet 4.5',
@@ -94,7 +103,9 @@ export default class AnthropicProvider extends BaseProvider {
94103
// Determine completion token limits based on specific model
95104
let maxCompletionTokens = 128000; // default for older Claude 3 models
96105

97-
if (m.id?.includes('claude-sonnet-4-5') || m.id?.includes('claude-haiku-4-5')) {
106+
if (m.id?.includes('claude-opus-4-5')) {
107+
maxCompletionTokens = 64000; // Claude Opus 4.5: 64K output limit
108+
} else if (m.id?.includes('claude-sonnet-4-5') || m.id?.includes('claude-haiku-4-5')) {
98109
maxCompletionTokens = 64000; // Claude 4.5 Sonnet/Haiku: 64K output limit
99110
} else if (m.id?.includes('claude-opus-4-1') || m.id?.includes('claude-opus-4')) {
100111
maxCompletionTokens = 32000; // Claude 4 Opus: 32K output limit

app/lib/modules/llm/providers/open-router.ts

Lines changed: 37 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,17 @@ export default class OpenRouterProvider extends BaseProvider {
3030
staticModels: ModelInfo[] = [
3131
/*
3232
* Essential fallback models - only the most stable/reliable ones
33-
* Claude Sonnet 4.5 via OpenRouter: 200k context
33+
* Claude Opus 4.5 via OpenRouter: 200k context, maximum intelligence
3434
*/
35+
{
36+
name: 'anthropic/claude-opus-4-5',
37+
label: 'Claude Opus 4.5',
38+
provider: 'OpenRouter',
39+
maxTokenAllowed: 200000,
40+
maxCompletionTokens: 64000,
41+
},
42+
43+
// Claude Sonnet 4.5 via OpenRouter: 200k context
3544
{
3645
name: 'anthropic/claude-sonnet-4-5',
3746
label: 'Claude Sonnet 4.5',
@@ -40,6 +49,33 @@ export default class OpenRouterProvider extends BaseProvider {
4049
maxCompletionTokens: 64000,
4150
},
4251

52+
// GPT-5.2 Pro via OpenRouter: 400k context, highest accuracy
53+
{
54+
name: 'openai/gpt-5.2-pro',
55+
label: 'GPT-5.2 Pro',
56+
provider: 'OpenRouter',
57+
maxTokenAllowed: 400000,
58+
maxCompletionTokens: 128000,
59+
},
60+
61+
// GPT-5.2 Thinking via OpenRouter: 400k context, complex reasoning
62+
{
63+
name: 'openai/gpt-5.2-thinking',
64+
label: 'GPT-5.2 Thinking',
65+
provider: 'OpenRouter',
66+
maxTokenAllowed: 400000,
67+
maxCompletionTokens: 128000,
68+
},
69+
70+
// GPT-5.2 Instant via OpenRouter: 400k context, optimized for speed
71+
{
72+
name: 'openai/gpt-5.2-instant',
73+
label: 'GPT-5.2 Instant',
74+
provider: 'OpenRouter',
75+
maxTokenAllowed: 400000,
76+
maxCompletionTokens: 128000,
77+
},
78+
4379
// GPT-5.1 via OpenRouter: 128k context
4480
{
4581
name: 'openai/gpt-5.1',

app/lib/modules/llm/providers/openai.ts

Lines changed: 37 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,35 @@ export default class OpenAIProvider extends BaseProvider {
1616
staticModels: ModelInfo[] = [
1717
/*
1818
* Essential fallback models - only the most stable/reliable ones
19-
* GPT-5.1: 128k context, 16k output limit (best for coding and agentic tasks)
19+
* GPT-5.2 Pro: 400k context, 128k output, highest accuracy and quality
2020
*/
21+
{
22+
name: 'gpt-5.2-pro',
23+
label: 'GPT-5.2 Pro',
24+
provider: 'OpenAI',
25+
maxTokenAllowed: 400000,
26+
maxCompletionTokens: 128000,
27+
},
28+
29+
// GPT-5.2 Thinking: 400k context, 128k output, for complex reasoning and coding
30+
{
31+
name: 'gpt-5.2-thinking',
32+
label: 'GPT-5.2 Thinking',
33+
provider: 'OpenAI',
34+
maxTokenAllowed: 400000,
35+
maxCompletionTokens: 128000,
36+
},
37+
38+
// GPT-5.2 Instant: 400k context, 128k output, optimized for speed
39+
{
40+
name: 'gpt-5.2-instant',
41+
label: 'GPT-5.2 Instant',
42+
provider: 'OpenAI',
43+
maxTokenAllowed: 400000,
44+
maxCompletionTokens: 128000,
45+
},
46+
47+
// GPT-5.1: 128k context, 16k output limit (best for coding and agentic tasks)
2148
{
2249
name: 'gpt-5.1',
2350
label: 'GPT-5.1',
@@ -112,6 +139,12 @@ export default class OpenAIProvider extends BaseProvider {
112139
// OpenAI provides context_length in their API response
113140
if (m.context_length) {
114141
contextWindow = m.context_length;
142+
} else if (m.id?.includes('gpt-5.2')) {
143+
contextWindow = 400000; // GPT-5.2 has 400k context
144+
} else if (m.id?.includes('gpt-5.1')) {
145+
contextWindow = 128000; // GPT-5.1 has 128k context
146+
} else if (m.id?.includes('gpt-5')) {
147+
contextWindow = 128000; // Other GPT-5 models have 128k context
115148
} else if (m.id?.includes('gpt-4o')) {
116149
contextWindow = 128000; // GPT-4o has 128k context
117150
} else if (m.id?.includes('gpt-4-turbo') || m.id?.includes('gpt-4-1106')) {
@@ -135,6 +168,8 @@ export default class OpenAIProvider extends BaseProvider {
135168
maxCompletionTokens = 32000; // Other o1 models: 32K limit
136169
} else if (m.id?.includes('o3') || m.id?.includes('o4')) {
137170
maxCompletionTokens = 100000; // o3/o4 models: 100K output limit
171+
} else if (m.id?.includes('gpt-5.2')) {
172+
maxCompletionTokens = 128000; // GPT-5.2: 128K output limit
138173
} else if (m.id?.includes('gpt-5.1')) {
139174
maxCompletionTokens = 16384; // GPT-5.1: 16K output limit
140175
} else if (m.id?.includes('gpt-5-mini')) {
@@ -155,7 +190,7 @@ export default class OpenAIProvider extends BaseProvider {
155190
name: m.id,
156191
label: `${m.id} (${Math.floor(contextWindow / 1000)}k context)`,
157192
provider: this.name,
158-
maxTokenAllowed: Math.min(contextWindow, 128000), // Cap at 128k for safety
193+
maxTokenAllowed: Math.min(contextWindow, 400000), // Cap at 400k for safety
159194
maxCompletionTokens,
160195
};
161196
});

app/utils/constants.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ export const WORK_DIR = `/home/${WORK_DIR_NAME}`;
66
export const MODIFICATIONS_TAG_NAME = 'codinit_file_modifications';
77
export const MODEL_REGEX = /^\[Model: (.*?)\]\n\n/;
88
export const PROVIDER_REGEX = /\[Provider: (.*?)\]\n\n/;
9-
export const DEFAULT_MODEL = 'claude-3-5-sonnet-latest';
9+
export const DEFAULT_MODEL = 'claude-4-5-sonnet-latest';
1010
export const PROMPT_COOKIE_KEY = 'cachedPrompt';
1111

1212
export const TOOL_EXECUTION_APPROVAL = {

0 commit comments

Comments
 (0)