Merge pull request #1565 from hlohaus/gemini

Fix OpenaiChat provider, Fix issue with curl_cffi
pull/1568/head^2 0.2.1.1
H Lohaus 4 months ago committed by GitHub
commit 1d6709dafc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -358,6 +358,21 @@ response = g4f.ChatCompletion.create(
# Displaying the response
print(response)
from g4f.image import ImageResponse
# Get image links from response
for chunk in g4f.ChatCompletion.create(
model=g4f.models.default, # Using the default model
provider=g4f.Provider.OpenaiChat, # Specifying the provider as OpenaiChat
messages=[{"role": "user", "content": "Create images with dogs"}],
access_token="...", # Need a access token from a plus user
stream=True,
ignore_stream=True
):
if isinstance(chunk, ImageResponse):
print(chunk.images) # Print generated image links
print(chunk.alt) # Print used prompt for image generation
```
##### Using Browser

@ -1,5 +1,6 @@
from __future__ import annotations
from urllib import parse
from datetime import datetime
from ..typing import AsyncResult, Messages
@ -55,9 +56,9 @@ class Phind(AsyncGeneratorProvider):
"customLinks": []
},
"context": "\n".join([message["content"] for message in messages if message["role"] == "system"]),
"rewrittenQuestion": prompt,
"challenge": 0.21132115912208504
}
data["challenge"] = generate_challenge(data)
async with session.post(f"https://https.api.phind.com/infer/", headers=headers, json=data) as response:
new_line = False
async for line in response.iter_lines():
@ -65,6 +66,8 @@ class Phind(AsyncGeneratorProvider):
chunk = line[6:]
if chunk.startswith(b'<PHIND_DONE/>'):
break
if chunk.startswith(b'<PHIND_BACKEND_ERROR>'):
raise RuntimeError(f"Response: {chunk}")
if chunk.startswith(b'<PHIND_WEBRESULTS>') or chunk.startswith(b'<PHIND_FOLLOWUP>'):
pass
elif chunk.startswith(b"<PHIND_METADATA>") or chunk.startswith(b"<PHIND_INDICATOR>"):
@ -78,3 +81,46 @@ class Phind(AsyncGeneratorProvider):
new_line = False
else:
new_line = True
def deterministic_stringify(obj):
def handle_value(value):
if isinstance(value, (dict, list)):
if isinstance(value, list):
return '[' + ','.join(sorted(map(handle_value, value))) + ']'
else: # It's a dict
return '{' + deterministic_stringify(value) + '}'
elif isinstance(value, bool):
return 'true' if value else 'false'
elif isinstance(value, (int, float)):
return format(value, '.8f').rstrip('0').rstrip('.')
elif isinstance(value, str):
return f'"{value}"'
else:
return 'null'
items = sorted(obj.items(), key=lambda x: x[0])
return ','.join([f'{k}:{handle_value(v)}' for k, v in items if handle_value(v) is not None])
def simple_hash(s):
d = 0
for char in s:
if len(char) > 1 or ord(char) >= 256:
continue
d = ((d << 5) - d + ord(char[0])) & 0xFFFFFFFF
if d > 0x7FFFFFFF: # 2147483647
d -= 0x100000000 # Subtract 2**32
return d
def generate_challenge(obj):
deterministic_str = deterministic_stringify(obj)
encoded_str = parse.quote(deterministic_str, safe='')
c = simple_hash(encoded_str)
a = (9301 * c + 49297)
b = 233280
# If negativ, we need a special logic
if a < 0:
return ((a%b)-b)/b
else:
return a%b/b

@ -342,26 +342,30 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
raise MissingAuthError(f'Missing "access_token"')
cls._cookies = cookies
headers = {"Authorization": f"Bearer {access_token}"}
auth_headers = {"Authorization": f"Bearer {access_token}"}
async with StreamSession(
proxies={"https": proxy},
impersonate="chrome110",
timeout=timeout,
cookies=dict([(name, value) for name, value in cookies.items() if name == "_puid"])
headers={"Cookie": "; ".join(f"{k}={v}" for k, v in cookies.items())}
) as session:
try:
image_response = None
if image:
image_response = await cls.upload_image(session, headers, image, kwargs.get("image_name"))
image_response = await cls.upload_image(session, auth_headers, image, kwargs.get("image_name"))
except Exception as e:
yield e
end_turn = EndTurn()
model = cls.get_model(model or await cls.get_default_model(session, headers))
model = cls.get_model(model or await cls.get_default_model(session, auth_headers))
model = "text-davinci-002-render-sha" if model == "gpt-3.5-turbo" else model
while not end_turn.is_end:
arkose_token = await cls.get_arkose_token(session)
data = {
"action": action,
"arkose_token": await cls.get_arkose_token(session),
"arkose_token": arkose_token,
"conversation_mode": {"kind": "primary_assistant"},
"force_paragen": False,
"force_rate_limit": False,
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": model,
@ -373,7 +377,11 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
async with session.post(
f"{cls.url}/backend-api/conversation",
json=data,
headers={"Accept": "text/event-stream", **headers}
headers={
"Accept": "text/event-stream",
"OpenAI-Sentinel-Arkose-Token": arkose_token,
**auth_headers
}
) as response:
if not response.ok:
raise RuntimeError(f"Response {response.status_code}: {await response.text()}")
@ -439,7 +447,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
Returns:
tuple[str, dict]: A tuple containing the access token and cookies.
"""
with get_browser(proxy=proxy) as driver:
driver = get_browser(proxy=proxy)
try:
driver.get(f"{cls.url}/")
WebDriverWait(driver, timeout).until(EC.presence_of_element_located((By.ID, "prompt-textarea")))
access_token = driver.execute_script(
@ -451,6 +460,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"return accessToken;"
)
return access_token, get_driver_cookies(driver)
finally:
driver.close()
@classmethod
async def get_arkose_token(cls, session: StreamSession) -> str:

@ -89,7 +89,7 @@ gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = RetryProvider([
Bing, Phind, Liaobots,
Bing, Liaobots,
])
)

@ -15,7 +15,7 @@ INSTALL_REQUIRE = [
EXTRA_REQUIRE = {
'all': [
"curl_cffi>=0.5.10",
"curl_cffi>=0.6.0b9",
"certifi",
"async-property", # openai
"py-arkose-generator", # openai

Loading…
Cancel
Save