Skip to content

Commit 51b4b8b

Browse files
committed
feat: add model alias handling and update Cloudflare provider
- Introduced `clean_name` function in `Cloudflare.py` to format model names by removing specific suffixes. - Updated `model_aliases` in `Cloudflare` class with new model mappings, and cleaned up redundant entries. - Set `models` in the `Cloudflare` class to use keys from `model_aliases`. - Adjusted the caching logic in `Cloudflare` to include new headers for requests. - Added `parent` and `login_url` attributes in `DeepInfraChat` class. - Updated `PollinationsAI` to clean up model retrieval logic and fixed handling of existing checks. - Refactored `HarProvider` to inherit models and aliases from `LegacyLMArena`. - Implemented loading environment variables from `.env` file in `cookies.py`. - Updated default headers in `defaults.py` for user agent and `sec-ch-ua`. - Cleaned up various model references in `any_model_map.py` to reflect differences in audio, vision, and other model types. - Added a more centralized handling for API key management in `run_tools.py` to accommodate new nomenclature. - Enhanced existing logic to allow for more granular loading and utilization of API keys from environment variables.
1 parent bdc356c commit 51b4b8b

File tree

19 files changed

+1467
-1227
lines changed

19 files changed

+1467
-1227
lines changed

g4f/Provider/Cloudflare.py

Lines changed: 65 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,25 @@
1212
from .. import debug
1313
from .helper import render_messages
1414

15+
def clean_name(name: str) -> str:
16+
return name.split("/")[-1].replace(
17+
"-instruct", "").replace(
18+
"-17b-16e", "").replace(
19+
"-chat", "").replace(
20+
"-fp8", "").replace(
21+
"-fast", "").replace(
22+
"-int8", "").replace(
23+
"-awq", "").replace(
24+
"-qvq", "").replace(
25+
"-r1", "").replace(
26+
"meta-llama-", "llama-")
27+
28+
# models = []
29+
# model_aliases = {clean_name(m.get("name")): m.get("name") for m in models}
30+
# open(__file__, "a").write(f"""# Generated by g4f.models.cloudflare.py
31+
# model_aliases = {model_aliases}
32+
# """)
33+
1534
class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
1635
label = "Cloudflare AI"
1736
url = "https://playground.ai.cloudflare.com"
@@ -23,23 +42,48 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
2342
supports_system_message = True
2443
supports_message_history = True
2544
default_model = "@cf/meta/llama-3.3-70b-instruct-fp8-fast"
26-
model_aliases = {
27-
"llama-2-7b": "@cf/meta/llama-2-7b-chat-fp16",
28-
"llama-2-7b": "@cf/meta/llama-2-7b-chat-int8",
29-
"llama-3-8b": "@cf/meta/llama-3-8b-instruct",
30-
"llama-3-8b": "@cf/meta/llama-3-8b-instruct-awq",
31-
"llama-3-8b": "@hf/meta-llama/meta-llama-3-8b-instruct",
32-
"llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-awq",
33-
"llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-fp8",
34-
"llama-3.2-1b": "@cf/meta/llama-3.2-1b-instruct",
35-
"llama-4-scout": "@cf/meta/llama-4-scout-17b-16e-instruct",
36-
"deepseek-math-7b": "@cf/deepseek-ai/deepseek-math-7b-instruct",
37-
"deepseek-r1-qwen-32b": "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
38-
"falcon-7b": "@cf/tiiuae/falcon-7b-instruct",
39-
"qwen-1.5-7b": "@cf/qwen/qwen1.5-7b-chat-awq",
40-
"qwen-2.5-coder": "@cf/qwen/qwen2.5-coder-32b-instruct",
45+
model_aliases = {
46+
'deepseek-coder-6.7b-base': '@hf/thebloke/deepseek-coder-6.7b-base-awq',
47+
'deepseek-coder-6.7b': '@hf/thebloke/deepseek-coder-6.7b-instruct-awq',
48+
'deepseek-math-7b': '@cf/deepseek-ai/deepseek-math-7b-instruct',
49+
'deepseek-distill-qwen-32b': '@cf/deepseek-ai/deepseek-r1-distill-qwen-32b',
50+
'discolm-german-7b-v1': '@cf/thebloke/discolm-german-7b-v1-awq',
51+
'falcon-7b': '@cf/tiiuae/falcon-7b-instruct',
52+
'gemma-3-12b-it': '@cf/google/gemma-3-12b-it',
53+
'gemma-7b-it': '@hf/google/gemma-7b-it',
54+
'hermes-2-pro-mistral-7b': '@hf/nousresearch/hermes-2-pro-mistral-7b',
55+
'llama-2-13b': '@hf/thebloke/llama-2-13b-chat-awq',
56+
'llama-2-7b-fp16': '@cf/meta/llama-2-7b-chat-fp16',
57+
'llama-2-7b': '@cf/meta/llama-2-7b-chat-int8',
58+
'llama-3-8b': '@hf/meta-llama/meta-llama-3-8b-instruct',
59+
'llama-3.1-8b': '@cf/meta/llama-3.1-8b-instruct-fp8',
60+
'llama-3.2-11b-vision': '@cf/meta/llama-3.2-11b-vision-instruct',
61+
'llama-3.2-1b': '@cf/meta/llama-3.2-1b-instruct',
62+
'llama-3.2-3b': '@cf/meta/llama-3.2-3b-instruct',
63+
'llama-3.3-70b': '@cf/meta/llama-3.3-70b-instruct-fp8-fast',
64+
'llama-4-scout': '@cf/meta/llama-4-scout-17b-16e-instruct',
65+
'llama-guard-3-8b': '@cf/meta/llama-guard-3-8b',
66+
'llamaguard-7b': '@hf/thebloke/llamaguard-7b-awq',
67+
'mistral-7b-v0.1': '@hf/thebloke/mistral-7b-instruct-v0.1-awq',
68+
'mistral-7b-v0.2': '@hf/mistral/mistral-7b-instruct-v0.2',
69+
'mistral-small-3.1-24b': '@cf/mistralai/mistral-small-3.1-24b-instruct',
70+
'neural-7b-v3-1': '@hf/thebloke/neural-chat-7b-v3-1-awq',
71+
'openchat-3.5-0106': '@cf/openchat/openchat-3.5-0106',
72+
'openhermes-2.5-mistral-7b': '@hf/thebloke/openhermes-2.5-mistral-7b-awq',
73+
'phi-2': '@cf/microsoft/phi-2',
74+
'qwen1.5-0.5b': '@cf/qwen/qwen1.5-0.5b-chat',
75+
'qwen1.5-1.8b': '@cf/qwen/qwen1.5-1.8b-chat',
76+
'qwen1.5-14b': '@cf/qwen/qwen1.5-14b-chat-awq',
77+
'qwen1.5-7b': '@cf/qwen/qwen1.5-7b-chat-awq',
78+
'qwen2.5-coder-32b': '@cf/qwen/qwen2.5-coder-32b-instruct',
79+
'qwq-32b': '@cf/qwen/qwq-32b',
80+
'sqlcoder-7b-2': '@cf/defog/sqlcoder-7b-2',
81+
'starling-lm-7b-beta': '@hf/nexusflow/starling-lm-7b-beta',
82+
'tinyllama-1.1b-v1.0': '@cf/tinyllama/tinyllama-1.1b-chat-v1.0',
83+
'una-cybertron-7b-v2-bf16': '@cf/fblgit/una-cybertron-7b-v2-bf16',
84+
'zephyr-7b-beta': '@hf/thebloke/zephyr-7b-beta-awq'
4185
}
42-
fallback_models = list(model_aliases.keys())
86+
models = list(model_aliases.keys())
4387
_args: dict = None
4488

4589
@classmethod
@@ -106,6 +150,9 @@ async def create_async_generator(
106150
) -> AsyncResult:
107151
cache_file = cls.get_cache_file()
108152
if cls._args is None:
153+
headers = DEFAULT_HEADERS.copy()
154+
headers["referer"] = f"{cls.url}"
155+
headers["origin"] = cls.url
109156
if cache_file.exists():
110157
with cache_file.open("r") as f:
111158
cls._args = json.load(f)
@@ -114,9 +161,9 @@ async def create_async_generator(
114161
cls._args = await get_args_from_nodriver(cls.url, proxy=proxy)
115162
except (RuntimeError, FileNotFoundError) as e:
116163
debug.log(f"Cloudflare: Nodriver is not available:", e)
117-
cls._args = {"headers": DEFAULT_HEADERS, "cookies": {}, "impersonate": "chrome"}
164+
cls._args = {"headers": headers, "cookies": {}, "impersonate": "chrome"}
118165
else:
119-
cls._args = {"headers": DEFAULT_HEADERS, "cookies": {}, "impersonate": "chrome"}
166+
cls._args = {"headers": headers, "cookies": {}, "impersonate": "chrome"}
120167
try:
121168
model = cls.get_model(model)
122169
except ModelNotFoundError:

g4f/Provider/DeepInfraChat.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,9 @@
77

88

99
class DeepInfraChat(OpenaiTemplate):
10+
parent = "DeepInfra"
1011
url = "https://deepinfra.com/chat"
12+
login_url = "https://deepinfra.com/dash/api_keys"
1113
api_base = "https://api.deepinfra.com/v1/openai"
1214
working = True
1315

g4f/Provider/PollinationsAI.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,6 @@
6161
"role": "developer",
6262
"content": "Provide conversation options.",
6363
}]
64-
6564
class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
6665
label = "Pollinations AI"
6766
url = "https://pollinations.ai"
@@ -121,15 +120,15 @@ def get_model(cls, model: str) -> str:
121120
"""Get the internal model name from the user-provided model name."""
122121
if not model:
123122
return cls.default_model
124-
125-
# Check if the model exists directly in our model lists
126-
if model in cls.text_models or model in cls.image_models or model in cls.audio_models:
127-
return model
128-
123+
129124
# Check if there's an alias for this model
130125
if model in cls.model_aliases:
131126
return cls.model_aliases[model]
132-
127+
128+
# Check if the model exists directly in our model lists
129+
if model in cls.text_models or model in cls.image_models or model in cls.audio_models:
130+
return model
131+
133132
# If no match is found, raise an error
134133
raise ModelNotFoundError(f"PollinationsAI: Model {model} not found")
135134

g4f/Provider/har/__init__.py

Lines changed: 6 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -22,65 +22,14 @@ class HarProvider(AsyncGeneratorProvider, ProviderModelMixin):
2222
url = "https://legacy.lmarena.ai"
2323
api_endpoint = "/queue/join?"
2424
working = True
25-
default_model = "chatgpt-4o-latest-20250326"
26-
model_aliases = LegacyLMArena.model_aliases
27-
vision_models = [
28-
"o3-2025-04-16",
29-
"o4-mini-2025-04-16",
30-
"gpt-4.1-2025-04-14",
31-
"gemini-2.5-pro-exp-03-25",
32-
"claude-3-7-sonnet-20250219",
33-
"claude-3-7-sonnet-20250219-thinking-32k",
34-
"llama-4-maverick-17b-128e-instruct",
35-
"gpt-4.1-mini-2025-04-14",
36-
"gpt-4.1-nano-2025-04-14",
37-
"gemini-2.0-flash-thinking-exp-01-21",
38-
"gemini-2.0-flash-001",
39-
"gemini-2.0-flash-lite-preview-02-05",
40-
"claude-3-5-sonnet-20241022",
41-
"gpt-4o-mini-2024-07-18",
42-
"gpt-4o-2024-11-20",
43-
"gpt-4o-2024-08-06",
44-
"gpt-4o-2024-05-13",
45-
"claude-3-5-sonnet-20240620",
46-
"doubao-1.5-vision-pro-32k-250115",
47-
"amazon-nova-pro-v1.0",
48-
"amazon-nova-lite-v1.0",
49-
"qwen2.5-vl-32b-instruct",
50-
"qwen2.5-vl-72b-instruct",
51-
"gemini-1.5-pro-002",
52-
"gemini-1.5-flash-002",
53-
"gemini-1.5-flash-8b-001",
54-
"gemini-1.5-pro-001",
55-
"gemini-1.5-flash-001",
56-
"hunyuan-standard-vision-2024-12-31",
57-
"pixtral-large-2411",
58-
"step-1o-vision-32k-highres",
59-
"claude-3-haiku-20240307",
60-
"claude-3-sonnet-20240229",
61-
"claude-3-opus-20240229",
62-
"qwen-vl-max-1119",
63-
"qwen-vl-max-0809",
64-
"reka-core-20240904",
65-
"reka-flash-20240904",
66-
"c4ai-aya-vision-32b",
67-
"pixtral-12b-2409"
68-
]
25+
default_model = LegacyLMArena.default_model
6926

7027
@classmethod
71-
def get_models(cls):
72-
for domain, harFile in read_har_files():
73-
for v in harFile['log']['entries']:
74-
request_url = v['request']['url']
75-
if domain not in request_url or "." in urlparse(request_url).path or "heartbeat" in request_url:
76-
continue
77-
if "\n\ndata: " not in v['response']['content']['text']:
78-
continue
79-
chunk = v['response']['content']['text'].split("\n\ndata: ")[2]
80-
cls.models = list(dict.fromkeys(get_str_list(find_list(json.loads(chunk), 'choices'))).keys())
81-
cls.models[0] = cls.default_model
82-
if cls.models:
83-
break
28+
def get_models(cls) -> list[str]:
29+
LegacyLMArena.get_models()
30+
cls.models = LegacyLMArena.models
31+
cls.model_aliases = LegacyLMArena.model_aliases
32+
cls.vision_models = LegacyLMArena.vision_models
8433
return cls.models
8534

8635
@classmethod

g4f/Provider/needs_auth/GeminiPro.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,8 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
4343

4444
@classmethod
4545
def get_models(cls, api_key: str = None, api_base: str = api_base) -> list[str]:
46+
if not api_key:
47+
return cls.fallback_models
4648
if not cls.models:
4749
try:
4850
url = f"{cls.api_base if not api_base else api_base}/models"

g4f/cli/client.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,8 @@
2525
class ConversationManager:
2626
"""Manages conversation history and state."""
2727

28-
def __init__(self, file_path: Path, model: Optional[str] = None, provider: Optional[str] = None) -> None:
29-
self.file_path = file_path
28+
def __init__(self, file_path: Optional[Path] = None, model: Optional[str] = None, provider: Optional[str] = None) -> None:
29+
self.file_path: Optional[Path] = file_path
3030
self.model: Optional[str] = model
3131
self.provider: Optional[str] = provider
3232
self.conversation = None
@@ -35,7 +35,7 @@ def __init__(self, file_path: Path, model: Optional[str] = None, provider: Optio
3535

3636
def _load(self) -> None:
3737
"""Load conversation from file."""
38-
if not self.file_path.is_file():
38+
if self.file_path is None or not self.file_path.is_file():
3939
return
4040

4141
try:
@@ -58,7 +58,7 @@ def _load(self) -> None:
5858

5959
def save(self) -> None:
6060
"""Save conversation to file."""
61-
if self.file_path.exists() and not self.file_path.is_file():
61+
if self.file_path is None:
6262
return
6363

6464
try:
@@ -227,6 +227,11 @@ def get_parser():
227227
action='store_true',
228228
help="Clear conversation history before starting"
229229
)
230+
parser.add_argument(
231+
'--no-config',
232+
action='store_true',
233+
help="Do not load configuration from conversation file"
234+
)
230235
parser.add_argument(
231236
'input',
232237
nargs='*',
@@ -247,11 +252,11 @@ async def run_args(input_text: str, args):
247252
debug.logging = True
248253

249254
# Initialize conversation manager
250-
conversation = ConversationManager(args.conversation_file, args.model, args.provider)
255+
conversation = ConversationManager(None if args.no_config else args.conversation_file, args.model, args.provider)
251256
if args.clear_history:
252257
conversation.history = []
253258
conversation.conversation = None
254-
259+
255260
# Set cookies directory if specified
256261
set_cookies_dir(str(args.cookies_dir))
257262
read_cookie_files()

g4f/client/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -377,7 +377,7 @@ def create(
377377
kwargs["ignore_stream"] = True
378378

379379
response = iter_run_tools(
380-
provider.create_function,
380+
provider,
381381
model=model,
382382
messages=messages,
383383
stream=stream,

g4f/config.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,13 @@ def get_config_dir() -> Path:
1212
else: # Linux and other UNIX-like
1313
return Path.home() / ".config"
1414

15-
CONFIG_DIR = get_config_dir() / "g4f"
15+
PACKAGE_NAME = "g4f"
16+
CONFIG_DIR = get_config_dir() / PACKAGE_NAME
1617
COOKIES_DIR = CONFIG_DIR / "cookies"
1718
CUSTOM_COOKIES_DIR = "./har_and_cookies"
18-
PACKAGE_NAME = "g4f"
1919
ORGANIZATION = "gpt4free"
2020
GITHUB_REPOSITORY = f"xtekky/{ORGANIZATION}"
21-
STATIC_DOMAIN = f"g4f.dev"
21+
STATIC_DOMAIN = f"{PACKAGE_NAME}.dev"
2222
STATIC_URL = f"https://{STATIC_DOMAIN}/"
2323
DIST_DIR = f"./{STATIC_DOMAIN}/dist"
2424
DOWNLOAD_URL = f"https://raw.githubusercontent.com/{ORGANIZATION}/{STATIC_DOMAIN}/refs/heads/main/"

g4f/cookies.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def g4f(domain_name: str) -> list:
4949

5050
class CookiesConfig():
5151
cookies: Dict[str, Cookies] = {}
52-
cookies_dir: str = CUSTOM_COOKIES_DIR if os.path.exists(CUSTOM_COOKIES_DIR) else COOKIES_DIR
52+
cookies_dir: str = CUSTOM_COOKIES_DIR if os.path.exists(CUSTOM_COOKIES_DIR) else str(COOKIES_DIR)
5353

5454
DOMAINS = [
5555
".bing.com",
@@ -135,6 +135,13 @@ def read_cookie_files(dirPath: str = None):
135135
debug.log(f"Read cookies: {dirPath} dir is not readable")
136136
return
137137

138+
try:
139+
from dotenv import load_dotenv
140+
load_dotenv(os.path.join(dirPath, ".env"), override=True)
141+
debug.log(f"Read cookies: Loaded environment variables from {dirPath}/.env")
142+
except ImportError:
143+
debug.error("Warning: 'python-dotenv' is not installed. Environment variables will not be loaded.")
144+
138145
def get_domain(v: dict) -> str:
139146
host = [h["value"] for h in v['request']['headers'] if h["name"].lower() in ("host", ":authority")]
140147
if not host:

g4f/gui/server/api.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ def decorated_log(*values: str, file = None):
156156
if "user" not in kwargs:
157157
debug.log = decorated_log
158158
proxy = os.environ.get("G4F_PROXY")
159-
provider = kwargs.get("provider")
159+
provider = kwargs.pop("provider", None)
160160
try:
161161
model, provider_handler = get_model_and_provider(
162162
kwargs.get("model"), provider,
@@ -179,7 +179,7 @@ def decorated_log(*values: str, file = None):
179179
if hasattr(provider_handler, "get_parameters"):
180180
yield self._format_json("parameters", provider_handler.get_parameters(as_json=True))
181181
try:
182-
result = iter_run_tools(ChatCompletion.create, **{**kwargs, "model": model, "provider": provider_handler, "download_media": download_media})
182+
result = iter_run_tools(provider_handler, **{**kwargs, "model": model, "download_media": download_media})
183183
for chunk in result:
184184
if isinstance(chunk, ProviderInfo):
185185
yield self.handle_provider(chunk, model)

0 commit comments

Comments
 (0)