-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathplay.py
More file actions
88 lines (71 loc) · 2.89 KB
/
play.py
File metadata and controls
88 lines (71 loc) · 2.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
# Import necessary libraries
import pandas as pd
import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, Flatten, Dense, Dropout
from tensorflow.keras.utils import to_categorical
# Load the dataset
try:
data = pd.read_csv("train.txt", sep=';')
data.columns = ["Text", "Emotions"]
except FileNotFoundError:
print("Error: 'train.txt' not found. Make sure it's in the correct directory.")
exit()
# Extract texts and labels
texts = data["Text"].tolist()
labels = data["Emotions"].tolist()
# Tokenize the text data
tokenizer = Tokenizer()
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
# Determine the maximum sequence length
max_length = max(len(seq) for seq in sequences)
# Pad sequences to ensure uniform length
padded_sequences = pad_sequences(sequences, maxlen=max_length)
# Encode the labels into integers
label_encoder = LabelEncoder()
encoded_labels = label_encoder.fit_transform(labels)
# One-hot encode the labels
one_hot_labels = to_categorical(encoded_labels)
# Split the data into training and testing sets
xtrain, xtest, ytrain, ytest = train_test_split(
padded_sequences, one_hot_labels, test_size=0.2, random_state=42
)
# Define the model architecture
model = Sequential()
model.add(Embedding(input_dim=len(tokenizer.word_index) + 1,
output_dim=128, input_length=max_length))
model.add(Flatten())
model.add(Dense(units=128, activation="relu"))
model.add(Dense(units=one_hot_labels.shape[1], activation="softmax"))
# Compile the model
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
# Train the model
epochs = 10
batch_size = 32
model.fit(xtrain, ytrain, epochs=epochs, batch_size=batch_size, validation_data=(xtest, ytest))
# Evaluate the model on the test set
loss, accuracy = model.evaluate(xtest, ytest)
print(f"\nTest Accuracy: {accuracy * 100:.2f}%")
# Function to predict emotion from user input
def predict_emotion(text):
# 1. Preprocess the input text
user_sequence = tokenizer.texts_to_sequences([text])
padded_user_sequence = pad_sequences(user_sequence, maxlen=max_length)
# 2. Make the prediction
predictions = model.predict(padded_user_sequence)
# 3. Decode the prediction
predicted_class_index = np.argmax(predictions)
predicted_emotion = label_encoder.inverse_transform([predicted_class_index])[0]
return predicted_emotion
# Get user input and predict
while True:
user_input = input("\nEnter a sentence (or type 'exit' to quit): ")
if user_input.lower() == 'exit':
break
predicted_emotion = predict_emotion(user_input)
print(f"Predicted emotion: {predicted_emotion}")