Spaces:
Sleeping
Sleeping
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import torch | |
# Cargar el modelo y tokenizer de LLaMA 2 | |
model_name = "meta-llama/Llama-2-7b-hf" # O ajusta según el modelo que prefieras | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
# Crear prompt | |
prompt = """ | |
You are an advanced AI assistant trained to process job titles and user queries. I will provide you with a list of job titles, and a user query. Your task is to: | |
1. Calculate the cosine similarity score between the query and each job title. | |
2. Rank the job titles from the most similar to the least similar based on their semantic meaning. | |
3. Return the top 5 job titles with their cosine similarity scores. | |
Here is the list of job titles from the CSV: | |
- Software Engineer | |
- Data Scientist | |
- Machine Learning Engineer | |
- Business Analyst | |
- Product Manager | |
... | |
The user's query is: "Machine Learning Expert" | |
Now, compute the similarity scores, rank the job titles, and return the top 5. | |
""" | |
# Tokenizar y generar respuesta | |
inputs = tokenizer(prompt, return_tensors="pt") | |
outputs = model.generate(**inputs, max_new_tokens=200) | |
# Decodificar y mostrar resultados | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
print(response) | |