File size: 2,517 Bytes
4cb627f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
## tests/test_models.py
```python
import pytest
from models import AVAILABLE_MODELS, find_model, ModelInfo

@pyte st.mark.parametrize("identifier, expected_id", [
    ("Moonshot Kimi-K2", "moonshotai/Kimi-K2-Instruct"),
    ("moonshotai/Kimi-K2-Instruct", "moonshotai/Kimi-K2-Instruct"),
    ("openai/gpt-4", "openai/gpt-4"),
])
def test_find_model(identifier, expected_id):
    model = find_model(identifier)
    assert isinstance(model, ModelInfo)
    assert model.id == expected_id


def test_find_model_not_found():
    assert find_model("nonexistent-model") is None


def test_available_models_have_unique_ids():
    ids = [m.id for m in AVAILABLE_MODELS]
    assert len(ids) == len(set(ids))
```

## tests/test_inference.py
```python
import pytest
from inference import chat_completion, stream_chat_completion
from models import ModelInfo

class DummyClient:
    def __init__(self, response):
        self.response = response
        self.chat = self
n    
    def completions(self, **kwargs):
        class Choice: pass
        choice = type('C', (), {'message': type('M', (), {'content': self.response})})
        return type('R', (), {'choices': [choice]})

@pytest.fixture(autouse=True)
def patch_client(monkeypatch):
    # Patch hf_client.get_inference_client
    from hf_client import get_inference_client
    def fake_client(model_id, provider):
        client = DummyClient("hello world")
        client.chat = client
        client.chat.completions = client
        return client
    monkeypatch.setattr('hf_client.get_inference_client', fake_client)


def test_chat_completion_returns_text():
    msg = [{'role': 'user', 'content': 'test'}]
    result = chat_completion('any-model', msg)
    assert isinstance(result, str)
    assert result == 'hello world'


def test_stream_chat_completion_yields_chunks():
    # simulate streaming
    class StreamClient(DummyClient):
        def completions(self, **kwargs):
            # emulate generator
            chunks = [type('C', (), {'choices': [type('Ch', (), {'delta': type('D', (), {'content': 'h'})})]}),
                      type('C', (), {'choices': [type('Ch', (), {'delta': type('D', (), {'content': 'i'})})]})]
            return iter(chunks)
    from hf_client import get_inference_client as real_get
    monkeypatch.setattr('hf_client.get_inference_client', lambda mid, prov: StreamClient(None))

    msg = [{'role': 'user', 'content': 'stream'}]
    chunks = list(stream_chat_completion('any-model', msg))
    assert ''.join(chunks) == 'hi'