Spaces:
Configuration error
Configuration error
# app.py | |
""" | |
Hugging Face Space app (Gradio) that forwards prompts to ChatGPT-like backend | |
using the provided dummy cookies. This is a best-effort reverse-proxy demo. | |
WARNING: | |
- These cookies are embedded here only because you explicitly provided dummy | |
cookies for an educational demo. Never put real, sensitive cookies or tokens | |
in a public Space. Use a private Space or environment secrets for real values. | |
- This reverse approach is unofficial and may break or be blocked by Cloudflare. | |
""" | |
import time | |
import uuid | |
import json | |
import requests | |
import gradio as gr | |
from typing import Optional | |
# ---------------------------- | |
# Configuration: embedded dummy cookies (as provided) | |
# ---------------------------- | |
COOKIES = { | |
"conv_key": "\"rKVevhxYJuphj8B_obXQBnMV0gFu_SYw1DGKP9EIlY4=\"", | |
"__Secure-next-auth.session-token": ( | |
"eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..pQAo79jORppyrvdi.VPSDpmUorC1Okv9nsvA_" | |
"xerz4xLWOEqWfjnzCxtb5orIjxH_5OKI7GMAdQYG6KMWmgrUp4yuWF7B0zO7_WWSqKsml9BWUYa" | |
"QQg_rli0mmypqD6V2hC0x0Bg312XpKh19Rhzj1boVik7FO9g145ZDmv9K0Svws48ENtZdn3ofUn" | |
"EeeFhwa7l8oNqh1M9KJ7GjAgaZRCZ6I8wESreYmExZ_thEyRS3qrI-Bq4VlxTTZGKnBbzBMh7SZY" | |
"pZU_CYQi1EmaR83BpBL6n9RWA_BZHrkEkbMbbSp4_2GqawK7REukhKlRGZ2vmmuNbFLnEYlZgGzI" | |
"Ffj6SKv78k4IjILLyGeVe5Z8gNOEjSoLzlfm696yi7016m0M6Fo6yK-cjR6d9wUPp5Xm_e8tSR2U" | |
"99ItEP2pWGEohHSAuohI2s117DUPhuKRZvyTYgFNGqQTrE_aOwKQoAY8W419HnEIrphTp2MC4CLF" | |
"jEN2WCH0Gkm-yZSiFjS_UUk8zrSATQgRdTM3s5xkZoCsAKedAFOwDZUa-0oQd2IQnW2PY0s00mgD5" | |
"54K_hBq0W2cn5B41Hrbf2uLqLoJMhYc2HrzKa6fvoTbcF_v5f4vewvGN_osmmkTHHFCXXQIGdz5pV" | |
"M-1CtD4xY0zQ5Q4OUJVoQP9iV01wEJjc5GG7vImba0mR4iV1jePDczAE8v4SGCy68mmbbbSqbbVuo" | |
"tM4LOgPxuMEtwCukj9uW8MZqrfGk5AxZcyYWwinjt2ztOtLxEytCnHNMnRV0B45OCey359CoK2htG" | |
"-sST7IWjFeMBSieWO1NiJOv0aSZed_M0x0SMJuyyzEprW-AxNLQrU4Zs_j29dfKdKCGhSic8K_oh7G" | |
"c9GAHLYMAztDn6ZIND-_tgbe5tm1TGSTHl8ZSh_cNLKnFs1RvCVIMb4pi986MO1byLeRR1r1tdqFN" | |
"_EP7JvQvoreuRPABBnTvDpH6pOE62skPqZ9j6ZlENciEt6_nixCXhPPl1wPJJd-16PKD65DIbszRDr" | |
"Y1K5RFYTgjNJDTEL1sZw6PcBIcuXSUL3V24Ke7MgSz4lq7gJXwWgTrOVTwPi3Kahk0xkbz_bYHhYCq" | |
"OP5CILrbprKmtuV9M9I4VHP7kZA-faOT-kblCdxpD1BDFuOo-MRwTJ68Z10iuBGVWDJDzup8Y5lHMI" | |
"2vyuJpv1VVIJ6igHkCUZjLX52WAW1QGuzNRBUJebm6EM3cDpUSc3ywcGyG4RZMHiJNfWmnxw_aq7jh" | |
"qK1bT47SN1JhkYynzMsyF4MzON9eLWun1vz915TzucwKsfQoftjXqmlro2_3BRU0T3FoSyrG722DDq" | |
"gNasSX9SeLZ1lJJtUv0IeXVH9u12Hafe9uUwGgfKwrofiDTtxTDNKeloGjWckoUtD8EGJUmBxFCdsY" | |
"pPWdFhroORbtZM-EkDqYkwl5nK6zUVt1bkaepg84Eok0-Nx519s8oGItSre8JL3nL7MEwhG9BgKAs2" | |
"3RKmNcsJzh5xvvqda4SGCPuNVFdEMZEB98eGmJ3jUuYnHCAhpQ4X766Ntnje-bhAruF7uKfGuvjjYl" | |
"9KH6IxD9JuCtxKqas1qenB4oNXmvwuHZjLbhuvH2bz-cvhcUFqdRIImIKD3Web6UepPWpFlwNDSsGni" | |
"eguLCks4pMmuTegQOU5n9CkgEB5MuQ3U6D9hG2fv_bdfCCK1kTGnfrTHn4G1vVzAym3yz09T73sNUnU" | |
"cf5H1P-Z8ltVm4KhBRk8HKypWdoCwEsP5whtHbBgYIVZ8XUzD-fo7OA2QViwknB5cyOsCxPxjh67Zrj" | |
"7XmFeaki1vcHd3U7eRGIym1UBQjkZ8CAA39LnUVszQqrQW2_CwuaxmgDh9qCZgH02OeG7Lj3sYcKxg" | |
"XplzaH6cBBKx7Vq1_uk49s9-hMpvKiQn_DdfmEYLmPrITza9jzqeeiL0IrhBOivev00WgYaI95OkgUh" | |
"Vs9fEqUCUDRSksd2bn0lyuERs_nCM4ngSkKvZWWVgXAnE8FE0rqYXuQOFccLcXYxr1vdNsfAIndmCi0" | |
"fF_PQkddBY-iu7pQb8n_YPvLLMflFavbCC-3BmVl__Hmr_WuovJGsZL1DYcO89b37m2ZIQLT2dpaK-J" | |
"zmcknO2OnSm1xoWkXD_vy5Kk5Ui7RVpp0ISM_2xh6pdgTOadcbG-NZ-sMsELHXATz_911lXAyQmoozar" | |
"k9Bwonrh7zMvjAVKxEtjwnldYuA_0DhzBsGydQHZ5nzbnzPjS4XuFKV3Y00b6i6J3efvHSf0wUzchLcx" | |
"An5i8Y9m7LUp_u9dxFFUokH7dqnkHiXzDpBxH2UqEw9NTB8gKKzl6H-Y6SJ7NOYmc4N9mF-HPgsnS-KyY" | |
"WXrvcQ98PF5gaoAG1JY-LD1VEiDQ6PKdcH5zEOkH6n8zg9tWaRiGHauBp_XytpZqoMlA9LFJH86xTFxR" | |
"RX-u0zWteFYLYL27NwHMDOfzFHapYLB-M9p-kgBIG8Zp6PmOckR5Es96_BOansDpEaStyRKazG1tFncLS" | |
"-eNz5Q0soRuGdpB2UvWPNlvJ_RX-TZjfZwVMRzPFAeliw-Q-63BbiGXKXyvivu5yHNfXhk63V2DINEsBw88" | |
"tTXrW9cHpSKwWDk7Bl4SprQAE-pV6a8jF9Qb1k7Ge93taXmH1ZgissL9DFig6bpGPHC1h1gtxKZ4eyKXCW" | |
"-nBtNOXZ6eCtD0VeEMEgkeNEgj8-oUBBpuALs3oCOVqeAcXlC0D7T34PAsrwyE1o9xTa1Y83q9aL1AjwVMD" | |
"nU-tmk01GZWAKqaWQwaOllRsK548uCwsKX-KAn3PwDNn5S6BZJkZ0XM0Hmwf1tQxqDZQy637kK3rXxFoOwg" | |
"ESqbKArUVk-bV8KjyOc8yIRgBe0SPlYloyjFHU2N_fAY_FlJNvRjvI7xSUNOs-Y3TVqDIYQG0uPJxGrDGiBy" | |
"m08bPIviK_hkoQBFlWId98MopTubtEY2n7DJ6-caa16KO_ToUq4RhqrRC1l0ZV7RFr0HEigxIwDtldWFs0op" | |
"C9ccje5l_sUtCApgiKim5vItesEVgS-F2hx8yY4AfJV7IwHBBEmO3pNmViGKPQ24RL2LQ3iuEbU9NIdzOfdBi" | |
"sKfaZOK7RrTdYExcUeMbEtzyKjBUSRsaq4UHC03E9LdksjMwLi3np3VNUT2om9k-o0S5DjUpI4tySijKQAA66" | |
"SpasDAstPO5Ss_6M4-v5Kj-r9QTtHC94pAM8AD-hb5mSw6qwIx4Q5F2SdLMrtY7Dd2Vd8A3bXnOn1HsAdZyUT1" | |
"W4DdS3Zl-VauV7ILlzQVLJ5AeAfATBRenNqRLHl2FBoNod6qtnui7bbRfvafWryKrdpr4otMWycPHPUbVWSXTm" | |
"Wps3T-59QULRTRrADKxIaoJuqIKxuGqnLdW18298vQjfEOv4uqGNKzcU9fTGz67SQpQG9EOeTZHGKzhzfqkKiECH" | |
"CDLhDWF3W7YPShqqda0T7_w_wFCH9rObq2wexqhUU_GPHnU93W4LOLE.ABtSFxKhamgcjdfZdJ-ahw" | |
), | |
"cf_clearance": "E_7wmR35a_eEQAwLDblDFr4lNhphlrbsA08K8B38SJI-1756807432-1.2.1.1-QAloR641KrVse4D_Wv7xFTbLWk_Grb_cx7DhrxfsjQnpG0JsiV_YGIXigmBT.Cof9TUI_jf1IJHDe5FafguobuXONZQ3Jqf3m7aekQPXocqaHhyOPIS_pa58A4PGUyfL7BwtqhlRLBbC8mCeddWlnTV0M99G8gO32Cfe.tYNAeil2M7vEgHjNHE.Zgg7BiPYj3QNFgDx4J49aaUzE95ta0uZDXb2clgoCeofL.MzPrA" | |
} | |
# ---------------------------- | |
# Build requests session with cookies | |
# ---------------------------- | |
SESSION = requests.Session() | |
for k, v in COOKIES.items(): | |
# set cookies for both chat.openai.com and chatgpt.com domain variants | |
try: | |
SESSION.cookies.set(k, v, domain=".chatgpt.com", path="/") | |
except Exception: | |
SESSION.cookies.set(k, v, domain="chatgpt.com", path="/") | |
# also attempt chat.openai.com | |
try: | |
SESSION.cookies.set(k, v, domain=".chat.openai.com", path="/") | |
except Exception: | |
pass | |
COMMON_HEADERS = { | |
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120 Safari/537.36", | |
"Accept": "application/json, text/plain, */*", | |
"Referer": "https://chat.openai.com/", | |
"Origin": "https://chat.openai.com" | |
} | |
# Endpoints to try (order matters) | |
ENDPOINTS = [ | |
"https://chat.openai.com/backend-api/conversation", | |
"https://chat.openai.com/backend-api/conversations", | |
"https://chatgpt.com/backend-api/conversation", | |
"https://chatgpt.com/backend-api/conversations", | |
] | |
# ---------------------------- | |
# Helpers | |
# ---------------------------- | |
def looks_like_html_block(text: str) -> bool: | |
t = (text or "").lower() | |
return "<html" in t and ("cloudflare" in t or "captcha" in t or "refresh" in t) | |
def parse_assistant_text_from_json(obj) -> Optional[str]: | |
""" | |
Try several common response shapes to extract assistant text. | |
""" | |
if obj is None: | |
return None | |
if isinstance(obj, str): | |
try: | |
obj = json.loads(obj) | |
except Exception: | |
return None | |
if not isinstance(obj, dict): | |
return None | |
# Shape 1: {'message': {'content': {'parts': [...]}}} | |
msg = obj.get("message") | |
if isinstance(msg, dict): | |
content = msg.get("content") or msg.get("message") or {} | |
if isinstance(content, dict): | |
parts = content.get("parts") or content.get("text") | |
if isinstance(parts, list) and parts: | |
return parts[0] | |
if isinstance(parts, str): | |
return parts | |
# Shape 2: {'choices': [{'delta': {'content': '...'}}]} | |
choices = obj.get("choices") | |
if isinstance(choices, list) and choices: | |
first = choices[0] | |
if isinstance(first, dict): | |
# streaming-like | |
delta = first.get("delta") or first.get("message") or {} | |
if isinstance(delta, dict): | |
if "content" in delta and isinstance(delta["content"], str): | |
return delta["content"] | |
if "parts" in delta and isinstance(delta["parts"], list) and delta["parts"]: | |
return delta["parts"][0] | |
if "text" in first and isinstance(first["text"], str): | |
return first["text"] | |
# Shape 3: top-level 'response' or 'content' | |
for k in ("response", "content", "item"): | |
if k in obj and isinstance(obj[k], str): | |
return obj[k] | |
return None | |
# ---------------------------- | |
# Core: send prompt (with retries) | |
# ---------------------------- | |
def send_prompt(prompt: str, model: str = "text-davinci-002-render-sha", timeout: int = 25): | |
payload = { | |
"action": "next", | |
"messages": [{ | |
"id": str(uuid.uuid4()), | |
"role": "user", | |
"content": {"content_type": "text", "parts": [prompt]} | |
}], | |
"model": model | |
} | |
# Try endpoints with retries | |
for endpoint in ENDPOINTS: | |
tries = 0 | |
backoff = 0.6 | |
while tries < 3: | |
tries += 1 | |
try: | |
resp = SESSION.post(endpoint, headers=COMMON_HEADERS, json=payload, timeout=timeout) | |
except Exception as e: | |
# network-level error | |
err = f"Network error contacting {endpoint}: {e}" | |
if tries < 3: | |
time.sleep(backoff) | |
backoff *= 2 | |
continue | |
return False, err | |
# If Cloudflare or HTML is returned -> considered blocked | |
if resp.status_code >= 400 or looks_like_html_block(resp.text): | |
snippet = resp.text[:800] | |
# If it's HTML block, return explicit message to user | |
if looks_like_html_block(resp.text): | |
return False, f"Blocked by Cloudflare / HTML detected at {endpoint}. Snippet:\n\n{snippet}" | |
# otherwise continue retrying on 5xx maybe | |
if 500 <= resp.status_code < 600 and tries < 3: | |
time.sleep(backoff) | |
backoff *= 2 | |
continue | |
return False, f"HTTP {resp.status_code} from {endpoint}: {snippet}" | |
# success-ish | |
try: | |
parsed = resp.json() | |
except Exception: | |
# Not JSON — return raw text | |
return True, resp.text[:4000] | |
# attempt to extract assistant text | |
assistant = parse_assistant_text_from_json(parsed) | |
if assistant: | |
return True, assistant | |
else: | |
# return full parsed JSON for debugging if no assistant found | |
return True, parsed | |
return False, "All endpoints failed after retries." | |
# ---------------------------- | |
# Gradio UI function | |
# ---------------------------- | |
def gradio_chat(message: str): | |
if not message or not message.strip(): | |
return "Please enter a message." | |
ok, result = send_prompt(message) | |
if ok: | |
# If result is JSON, pretty print small | |
if isinstance(result, (dict, list)): | |
return json.dumps(result, indent=2)[:3000] | |
return str(result) | |
else: | |
return f"Error / blocked: {result}" | |
# ---------------------------- | |
# Build Gradio app | |
# ---------------------------- | |
with gr.Blocks() as demo: | |
gr.Markdown("## 🚀 ChatGPT Reverse Demo (HF Space)\n" | |
"> Demo forwards prompt to ChatGPT-like backend using embedded dummy cookies.\n\n" | |
"**Important:** This is a demo. Do not expose live session tokens in public Spaces.") | |
with gr.Row(): | |
inp = gr.Textbox(label="Your prompt", lines=2, placeholder="Write something you want the assistant to answer...") | |
out = gr.Textbox(label="Assistant response", lines=10) | |
btn = gr.Button("Send") | |
btn.click(gradio_chat, inputs=inp, outputs=out) | |
if __name__ == "__main__": | |
demo.launch(server_name="0.0.0.0", server_port=8080) |