Wauplin HF Staff commited on
Commit
0adba25
·
verified ·
1 Parent(s): 108cf3f

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. src/routes/landingPageHtml.ts +6 -6
src/routes/landingPageHtml.ts CHANGED
@@ -510,7 +510,7 @@ client = OpenAI(
510
  )
511
 
512
  response = client.responses.create(
513
- model="Qwen/Qwen2.5-VL-7B-Instruct",
514
  instructions="You are a helpful assistant.",
515
  input="Tell me a three sentence bedtime story about a unicorn.",
516
  )
@@ -556,7 +556,7 @@ client = OpenAI(
556
  )
557
 
558
  response = client.responses.create(
559
- model="Qwen/Qwen2.5-VL-7B-Instruct",
560
  input=[
561
  {
562
  "role": "developer",
@@ -582,7 +582,7 @@ client = OpenAI(
582
  )
583
 
584
  stream = client.responses.create(
585
- model="Qwen/Qwen2.5-VL-7B-Instruct",
586
  input=[
587
  {
588
  "role": "user",
@@ -621,7 +621,7 @@ tools = [
621
  ]
622
 
623
  response = client.responses.create(
624
- model="meta-llama/Llama-3.3-70B-Instruct:cerebras",
625
  tools=tools,
626
  input="What is the weather like in Boston today?",
627
  tool_choice="auto",
@@ -645,7 +645,7 @@ class CalendarEvent(BaseModel):
645
  participants: list[str]
646
 
647
  response = client.responses.parse(
648
- model="meta-llama/Meta-Llama-3-70B-Instruct:novita",
649
  input=[
650
  {"role": "system", "content": "Extract the event information."},
651
  {
@@ -668,7 +668,7 @@ client = OpenAI(
668
  )
669
 
670
  response = client.responses.create(
671
- model="meta-llama/Llama-3.3-70B-Instruct:cerebras",
672
  input="how does tiktoken work?",
673
  tools=[
674
  {
 
510
  )
511
 
512
  response = client.responses.create(
513
+ model="moonshotai/Kimi-K2-Instruct:groq",
514
  instructions="You are a helpful assistant.",
515
  input="Tell me a three sentence bedtime story about a unicorn.",
516
  )
 
556
  )
557
 
558
  response = client.responses.create(
559
+ model="moonshotai/Kimi-K2-Instruct:groq",
560
  input=[
561
  {
562
  "role": "developer",
 
582
  )
583
 
584
  stream = client.responses.create(
585
+ model="moonshotai/Kimi-K2-Instruct:groq",
586
  input=[
587
  {
588
  "role": "user",
 
621
  ]
622
 
623
  response = client.responses.create(
624
+ model="moonshotai/Kimi-K2-Instruct:groq",
625
  tools=tools,
626
  input="What is the weather like in Boston today?",
627
  tool_choice="auto",
 
645
  participants: list[str]
646
 
647
  response = client.responses.parse(
648
+ model="moonshotai/Kimi-K2-Instruct:groq",
649
  input=[
650
  {"role": "system", "content": "Extract the event information."},
651
  {
 
668
  )
669
 
670
  response = client.responses.create(
671
+ model="moonshotai/Kimi-K2-Instruct:groq",
672
  input="how does tiktoken work?",
673
  tools=[
674
  {