File size: 14,458 Bytes
2d67aa6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
import unittest

import torch
from transformers import AutoTokenizer

from specforge.data.preprocessing import preprocess_conversations
from specforge.data.template import TEMPLATE_REGISTRY


# Utility function for visual debugging
def visualize_loss_mask(tokenizer, input_ids, loss_mask):
    """Utility function to visualize which tokens contribute to loss."""
    RED = "\033[91m"  # Non-assistant tokens (loss_mask = 0)
    GREEN = "\033[92m"  # Assistant tokens (loss_mask = 1)
    RESET = "\033[0m"

    print("\nLoss Mask Visualization:")
    print("RED = Non-assistant tokens (loss_mask = 0)")
    print("GREEN = Assistant tokens (loss_mask = 1)")
    print("-" * 50)

    # Handle both 1D and 2D tensors - flatten if needed
    if input_ids.dim() > 1:
        input_ids = input_ids.flatten()
    if loss_mask.dim() > 1:
        loss_mask = loss_mask.flatten()

    if len(input_ids) == 0 or len(loss_mask) == 0:
        print("Empty input")
        return

    current_mask = loss_mask[0].item()
    current_ids = []

    for i in range(len(input_ids)):
        if current_mask == loss_mask[i].item():
            current_ids.append(input_ids[i].item())
        else:
            if hasattr(tokenizer, "decode"):
                decoded_text = tokenizer.decode(current_ids, skip_special_tokens=False)
            else:
                decoded_text = " ".join([f"token_{id}" for id in current_ids])
            if current_mask == 0:
                print(f"{RED}{decoded_text}{RESET}", end="")
            else:
                print(f"{GREEN}{decoded_text}{RESET}", end="")
            current_ids = [input_ids[i].item()]
            current_mask = loss_mask[i].item()

    # Print remaining tokens
    if current_ids:
        if hasattr(tokenizer, "decode"):
            decoded_text = tokenizer.decode(current_ids, skip_special_tokens=False)
        else:
            decoded_text = " ".join([f"token_{id}" for id in current_ids])
        if current_mask == 0:
            print(f"{RED}{decoded_text}{RESET}")
        else:
            print(f"{GREEN}{decoded_text}{RESET}")
    print("\n" + "-" * 50)


class TestPreprocessing(unittest.TestCase):
    """Test suite for conversation preprocessing and loss mask generation."""

    def setUp(self):
        """Set up test fixtures with Qwen3-8B tokenizer and template."""
        self.model_path = "Qwen/Qwen3-8B"
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)
        self.chat_template = TEMPLATE_REGISTRY.get("qwen")
        self.max_length = 512

    def test_conversation_preprocessing_basic(self):
        """Test basic conversation preprocessing with assistant response identification."""
        conversations = [
            [
                {"role": "user", "content": "What is 2+2?"},
                {"role": "assistant", "content": "The answer is 4."},
            ]
        ]

        results = preprocess_conversations(
            tokenizer=self.tokenizer,
            conversations=conversations,
            chat_template=self.chat_template,
            max_length=self.max_length,
            is_preformatted=False,
        )

        # Check structure
        self.assertIn("input_ids", results)
        self.assertIn("loss_mask", results)
        self.assertIn("attention_mask", results)
        self.assertEqual(len(results["input_ids"]), 1)
        self.assertEqual(len(results["loss_mask"]), 1)
        self.assertEqual(len(results["attention_mask"]), 1)

        # Verify tensor shapes match
        input_ids = results["input_ids"][0].squeeze()
        loss_mask = results["loss_mask"][0].squeeze()
        attention_mask = results["attention_mask"][0].squeeze()

        self.assertEqual(input_ids.shape, loss_mask.shape)
        self.assertEqual(input_ids.shape, attention_mask.shape)

        # Check that some tokens are marked for loss (assistant response)
        self.assertTrue(
            torch.any(loss_mask == 1), "No tokens marked for loss computation"
        )

        # Check that some tokens are not marked for loss (system/user parts)
        self.assertTrue(
            torch.any(loss_mask == 0), "All tokens marked for loss computation"
        )

        # Verify the complete assistant response is captured in the loss mask
        assistant_token_indices = torch.where(loss_mask == 1)[0]
        if len(assistant_token_indices) > 0:
            assistant_tokens = input_ids[assistant_token_indices]
            assistant_text = self.tokenizer.decode(
                assistant_tokens, skip_special_tokens=False
            )
            expected_assistant_text = (
                "<think>\n\n</think>\n\nThe answer is 4.<|im_end|>\n"
            )
            self.assertEqual(
                assistant_text,
                expected_assistant_text,
                f"Assistant text does not match exactly. Expected: {repr(expected_assistant_text)}, Got: {repr(assistant_text)}",
            )

    def test_multiple_turns_conversation(self):
        """Test conversation with multiple user-assistant turns."""
        conversations = [
            [
                {"role": "user", "content": "What is 2+2?"},
                {"role": "assistant", "content": "The answer is 4."},
                {"role": "user", "content": "Are you sure?"},
                {"role": "assistant", "content": "Yes, I'm certain."},
            ]
        ]

        results = preprocess_conversations(
            tokenizer=self.tokenizer,
            conversations=conversations,
            chat_template=self.chat_template,
            max_length=self.max_length,
            is_preformatted=False,
        )

        input_ids = results["input_ids"][0].squeeze()
        loss_mask = results["loss_mask"][0].squeeze()

        # Get all assistant response tokens
        assistant_token_indices = torch.where(loss_mask == 1)[0]
        self.assertTrue(
            len(assistant_token_indices) > 0, "No assistant tokens identified"
        )

        # Decode assistant tokens to verify both responses are captured
        assistant_tokens = input_ids[assistant_token_indices]
        assistant_text = self.tokenizer.decode(
            assistant_tokens, skip_special_tokens=False
        )

        # Exact match for the complete assistant text from both turns
        expected_assistant_text = "The answer is 4.<|im_end|>\n<think>\n\n</think>\n\nYes, I'm certain.<|im_end|>\n"
        self.assertEqual(
            assistant_text,
            expected_assistant_text,
            f"Assistant text does not match exactly. Expected: {repr(expected_assistant_text)}, Got: {repr(assistant_text)}",
        )

    def test_preformatted_conversation(self):
        """Test preprocessing of pre-formatted conversation strings."""
        preformatted_conversations = [
            "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\nWhat is Python?<|im_end|>\n<|im_start|>assistant\nPython is a programming language.<|im_end|>\n"
        ]

        results = preprocess_conversations(
            tokenizer=self.tokenizer,
            conversations=preformatted_conversations,
            chat_template=self.chat_template,
            max_length=self.max_length,
            is_preformatted=True,
        )

        # Check basic structure
        self.assertEqual(len(results["input_ids"]), 1)

        input_ids = results["input_ids"][0].squeeze()
        loss_mask = results["loss_mask"][0].squeeze()

        # Verify assistant response is identified
        self.assertTrue(
            torch.any(loss_mask == 1),
            "No assistant tokens marked in preformatted input",
        )

        # Extract and verify assistant content
        assistant_token_indices = torch.where(loss_mask == 1)[0]
        assistant_tokens = input_ids[assistant_token_indices]
        assistant_text = self.tokenizer.decode(
            assistant_tokens, skip_special_tokens=False
        )

        # Check for exact match of the expected assistant response
        expected_assistant_text = "Python is a programming language.<|im_end|>\n"
        self.assertEqual(
            assistant_text,
            expected_assistant_text,
            f"Assistant text does not match exactly. Expected: {repr(expected_assistant_text)}, Got: {repr(assistant_text)}",
        )

    def test_assistant_span_boundaries(self):
        """Test that assistant span boundaries are correctly identified without truncation."""
        test_cases = [
            {
                "name": "Short response",
                "conversation": [
                    {"role": "user", "content": "Hi"},
                    {"role": "assistant", "content": "Hello!"},
                ],
                "expected_assistant_text": "<think>\n\n</think>\n\nHello!<|im_end|>\n",
            },
            {
                "name": "Response with punctuation",
                "conversation": [
                    {"role": "user", "content": "What's your name?"},
                    {"role": "assistant", "content": "I'm Claude, an AI assistant."},
                ],
                "expected_assistant_text": "<think>\n\n</think>\n\nI'm Claude, an AI assistant.<|im_end|>\n",
            },
            {
                "name": "Multi-sentence response",
                "conversation": [
                    {"role": "user", "content": "Tell me about Python."},
                    {
                        "role": "assistant",
                        "content": "Python is a programming language. It's very popular for AI.",
                    },
                ],
                "expected_assistant_text": "<think>\n\n</think>\n\nPython is a programming language. It's very popular for AI.<|im_end|>\n",
            },
            {
                "name": "Response with special characters",
                "conversation": [
                    {"role": "user", "content": "Show me math."},
                    {
                        "role": "assistant",
                        "content": "Sure! Here's an example: 2 + 2 = 4, and π ≈ 3.14159.",
                    },
                ],
                "expected_assistant_text": "<think>\n\n</think>\n\nSure! Here's an example: 2 + 2 = 4, and π ≈ 3.14159.<|im_end|>\n",
            },
        ]

        for test_case in test_cases:
            with self.subTest(test_case["name"]):
                conversations = [test_case["conversation"]]

                results = preprocess_conversations(
                    tokenizer=self.tokenizer,
                    conversations=conversations,
                    chat_template=self.chat_template,
                    max_length=self.max_length,
                    is_preformatted=False,
                )

                input_ids = results["input_ids"][0].squeeze()
                loss_mask = results["loss_mask"][0].squeeze()

                # Extract assistant tokens
                assistant_token_indices = torch.where(loss_mask == 1)[0]
                self.assertTrue(
                    len(assistant_token_indices) > 0,
                    f"No assistant tokens found for test case: {test_case['name']}",
                )

                assistant_tokens = input_ids[assistant_token_indices]
                assistant_text = self.tokenizer.decode(
                    assistant_tokens, skip_special_tokens=False
                )

                # Verify exact match of the expected assistant text
                expected_assistant_text = test_case["expected_assistant_text"]
                self.assertEqual(
                    assistant_text,
                    expected_assistant_text,
                    f"Assistant text does not match exactly for test case '{test_case['name']}'. Expected: {repr(expected_assistant_text)}, Got: {repr(assistant_text)}",
                )

                # Additional check: ensure no user content leaked into assistant spans
                user_content = test_case["conversation"][0]["content"]
                # Check if user content appears in assistant text (should not happen with exact matching)
                self.assertNotIn(
                    user_content,
                    assistant_text,
                    f"User content '{user_content}' found in assistant spans for test case '{test_case['name']}': '{assistant_text}'",
                )


if __name__ == "__main__":
    suite = unittest.TestSuite()

    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestPreprocessing))

    runner = unittest.TextTestRunner(verbosity=2)
    runner.run(suite)

    # Commented-out example for using visualize_loss_mask function directly
    """
    # Example usage of visualize_loss_mask for debugging/visualization
    model_path = "Qwen/Qwen3-8B"
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    chat_template = TEMPLATE_REGISTRY.get("qwen")

    # Using conversations list
    conversations = [
        [
            {"role": "user", "content": "What is 2+2?"},
            {"role": "assistant", "content": "The answer is 4."},
            {"role": "user", "content": "I don't think that's right"},
            {"role": "assistant", "content": "I'm pretty sure it's 4."},
        ],
        [
            {"role": "user", "content": "How do you boil water?"},
            {"role": "assistant", "content": "Use a stove."},
        ],
    ]
    results = preprocess_conversations(
        tokenizer=tokenizer,
        conversations=conversations,
        chat_template=chat_template,
        max_length=512,
        is_preformatted=False,
    )
    for i in range(len(results["input_ids"])):
        visualize_loss_mask(tokenizer, results["input_ids"][i], results["loss_mask"][i])

    # Using preformatted conversation
    preformatted_conversations = [
        "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\nWhat is 2+2?<|im_end|>\n<|im_start|>assistant\nThe answer is 4.<|im_end|>\n<|im_start|>user\nI don't think that's right<|im_end|>\n<|im_start|>assistant\n<think>\nI know 2+2 is 4</think>\n\nI'm pretty sure it's 4.<|im_end|>\n",
    ]
    results = preprocess_conversations(
        tokenizer=tokenizer,
        conversations=preformatted_conversations,
        chat_template=chat_template,
        max_length=512,
        is_preformatted=True,
    )
    for i in range(len(results["input_ids"])):
        visualize_loss_mask(tokenizer, results["input_ids"][i], results["loss_mask"][i])
    """