feat: configure OpenClaw main agent for native Anthropic API access
Route Claude models directly through the Anthropic API using a setup-token (Pro subscription) instead of the LiteLLM proxy. - Add anthropic:manual profile (setup-token auth) to auth-profiles.json - Remove Claude models from litellm provider in models.json (they now use the built-in anthropic catalog instead) - Set default model to anthropic/claude-sonnet-4-6 in openclaw.json - Add anthropic/* fallback chain: opus-4-6, sonnet-4-6, opus-4-5, sonnet-4-5, haiku-4-5 - Remove litellm/claude-* entries from fallback list - Update openai-codex and github-copilot credentials Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -780,6 +780,23 @@
|
||||
"cacheWrite": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "qwen2.5-14b-local",
|
||||
"name": "Qwen2.5 14B Instruct (local)",
|
||||
"api": "openai-completions",
|
||||
"reasoning": false,
|
||||
"input": [
|
||||
"text"
|
||||
],
|
||||
"contextWindow": 32768,
|
||||
"maxTokens": 8192,
|
||||
"cost": {
|
||||
"input": 0,
|
||||
"output": 0,
|
||||
"cacheRead": 0,
|
||||
"cacheWrite": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "gemini-3-pro-preview",
|
||||
"name": "gemini-3-pro-preview",
|
||||
@@ -851,6 +868,24 @@
|
||||
"cacheWrite": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "gpt-5.3-codex-spark",
|
||||
"name": "gpt-5.3-codex-spark",
|
||||
"api": "openai-completions",
|
||||
"reasoning": true,
|
||||
"input": [
|
||||
"text",
|
||||
"image"
|
||||
],
|
||||
"contextWindow": 400000,
|
||||
"maxTokens": 128000,
|
||||
"cost": {
|
||||
"input": 0,
|
||||
"output": 0,
|
||||
"cacheRead": 0,
|
||||
"cacheWrite": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "gemini-3.1-pro-preview",
|
||||
"name": "gemini-3.1-pro-preview",
|
||||
@@ -1339,6 +1374,58 @@
|
||||
"cacheRead": 0,
|
||||
"cacheWrite": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "glm-4.7-flash",
|
||||
"name": "glm-4.7-flash",
|
||||
"reasoning": false,
|
||||
"input": [
|
||||
"text"
|
||||
],
|
||||
"contextWindow": 200000,
|
||||
"maxTokens": 8192,
|
||||
"cost": {
|
||||
"input": 0,
|
||||
"output": 0,
|
||||
"cacheRead": 0,
|
||||
"cacheWrite": 0
|
||||
},
|
||||
"api": "openai-completions"
|
||||
},
|
||||
{
|
||||
"id": "glm-5",
|
||||
"name": "glm-5",
|
||||
"reasoning": false,
|
||||
"input": [
|
||||
"text"
|
||||
],
|
||||
"contextWindow": 200000,
|
||||
"maxTokens": 8192,
|
||||
"cost": {
|
||||
"input": 0,
|
||||
"output": 0,
|
||||
"cacheRead": 0,
|
||||
"cacheWrite": 0
|
||||
},
|
||||
"api": "openai-completions"
|
||||
},
|
||||
{
|
||||
"id": "gpt-5.4",
|
||||
"name": "gpt-5.4",
|
||||
"reasoning": true,
|
||||
"input": [
|
||||
"text",
|
||||
"image"
|
||||
],
|
||||
"contextWindow": 400000,
|
||||
"maxTokens": 128000,
|
||||
"cost": {
|
||||
"input": 0,
|
||||
"output": 0,
|
||||
"cacheRead": 0,
|
||||
"cacheWrite": 0
|
||||
},
|
||||
"api": "openai-completions"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user