Update README.md
Browse files
README.md
CHANGED
@@ -7,4 +7,55 @@ base_model:
|
|
7 |
pipeline_tag: text-classification
|
8 |
tags:
|
9 |
- personality
|
10 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
pipeline_tag: text-classification
|
8 |
tags:
|
9 |
- personality
|
10 |
+
---
|
11 |
+
|
12 |
+
|
13 |
+
|
14 |
+
## Model Details
|
15 |
+
|
16 |
+
* **Model Type:** PersonalityClassifier is a fine-tuned model from `google/flan-t5-xl` using annotation data for personality classification.
|
17 |
+
* **Model Date:** PersonalityClassifier was trained in Jan 2024.
|
18 |
+
* **Paper or resources for more information:** [https://arxiv.org/abs/2504.06868](https://arxiv.org/abs/2504.06868)
|
19 |
+
|
20 |
+
## Requirements
|
21 |
+
|
22 |
+
* `torch==2.1.0`
|
23 |
+
* `transformers==4.29.0`
|
24 |
+
|
25 |
+
## How to use the model
|
26 |
+
|
27 |
+
```python
|
28 |
+
import torch
|
29 |
+
from transformers import T5ForConditionalGeneration, AutoTokenizer
|
30 |
+
|
31 |
+
# Set device to CUDA if available, otherwise use CPU
|
32 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
33 |
+
|
34 |
+
# Load model and tokenizer
|
35 |
+
model_name = "mirlab/PersonalityClassifier"
|
36 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
37 |
+
model = T5ForConditionalGeneration.from_pretrained(model_name).to(device)
|
38 |
+
|
39 |
+
# Define model inference function
|
40 |
+
def modelGenerate(input_text, lm, tokenizer):
|
41 |
+
# Tokenize input text and move to device
|
42 |
+
input_ids = tokenizer(input_text, truncation=True, padding=True, return_tensors='pt')['input_ids'].to(device)
|
43 |
+
|
44 |
+
# Generate text using the model
|
45 |
+
model_output = lm.generate(input_ids)
|
46 |
+
|
47 |
+
# Decode generated tokens into text
|
48 |
+
model_answer = tokenizer.batch_decode(model_output, skip_special_tokens=True)
|
49 |
+
|
50 |
+
return model_answer
|
51 |
+
|
52 |
+
# Example input text
|
53 |
+
# Format: "[Valence] Statement: [Your Statement]. Trait: [Target Trait]"
|
54 |
+
# Target Trait is among ["Openness", "Conscientiousness", "Extraversion", "Agreeableness", "Neuroticism", "Machiavellianism", "Narcissism", "Psychopathy"].
|
55 |
+
# Valence indicates positive (+) or negative (-) alignment with the trait.
|
56 |
+
|
57 |
+
input_texts = "[Valence] Statement: I am outgoing. Trait: Extraversion"
|
58 |
+
|
59 |
+
# Generate output using the model and print
|
60 |
+
output_texts = modelGenerate(input_texts, model, tokenizer)
|
61 |
+
print(output_texts)
|