aimlnerd's picture
add
09366c9
raw
history blame contribute delete
876 Bytes
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datasets import load_dataset
from transformers import AutoTokenizer
TARGET = 'discourse_effectiveness'
TEXT = "discourse_text"
train_df = pd.read_csv("data/raw_data/train.csv")
test_df = pd.read_csv("data/raw_data/test.csv")
"""
train_df[TARGET].value_counts(ascending=True).plot.barh()
plt.title("Frequency of Classes")
plt.show()
train_df['discourse_type'].value_counts(ascending=True).plot.barh()
plt.title("Frequency of discourse_type")
plt.show()
train_df["Words Per text"] = train_df[TEXT].str.split().apply(len)
train_df.boxplot("Words Per text", by=TARGET, grid=False, showfliers=False,
color="black")
plt.suptitle("")
plt.xlabel("")
plt.show()
"""
model_ckpt = "distilbert-base-uncased"
tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
tokenizer.model_max_length
pass