-
Notifications
You must be signed in to change notification settings - Fork 0
/
model.py
61 lines (59 loc) · 2.74 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import numpy as np
import matplotlib.pyplot as plt
from utils import *
X_train,Y_train = read_csv('train_emoji.csv')
X_test, Y_test = read_csv('tesss.csv')
word_to_index, index_to_word, word_to_vec_map = read_glove_vecs('glove.6B.50d.txt')
from keras.models import Model
from keras.layers import Dense, Input, Dropout, LSTM, Activation
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.initializers import glorot_uniform
np.random.seed(1)
def sentences_to_indices(X, word_to_index, max_len):
m = X.shape[0]
X_indices = np.zeros((m,max_len))
for i in range(m):
sentence_words = [w.lower() for w in X[i].split()]
j = 0
for w in sentence_words:
X_indices[i, j] = word_to_index[w]
j = j+1
return X_indices
def pretrained_embedding_layer(word_to_vec_map, word_to_index):
vocab_len = len(word_to_index) + 1
emb_dim = word_to_vec_map["cucumber"].shape[0]
emb_matrix = np.zeros((vocab_len, emb_dim))
for word, idx in word_to_index.items():
emb_matrix[idx, :] = word_to_vec_map[word]
embedding_layer = Embedding(vocab_len, emb_dim, trainable=False)
embedding_layer.build((None,)) # Do not modify the "None". This line of code is complete as-is.
embedding_layer.set_weights([emb_matrix])
return embedding_layer
def predict_your_feel(input_shape, word_to_vec_map, word_to_index):
sentence_indices = Input(input_shape, dtype='int32')
embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)
embeddings = embedding_layer(sentence_indices)
X = LSTM(128, return_sequences=True)(embeddings)
X = Dropout(0.5)(X)
X = LSTM(128, return_sequences=False)(X)
X = Dropout(0.5)(X)
X = Dense(5)(X)
X = Activation('softmax')(X)
model = Model(inputs=sentence_indices, outputs=X)
return model
maxLen = len(max(X_train, key=len).split())
model = predict_your_feel((maxLen,), word_to_vec_map, word_to_index)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
X_train_indices = sentences_to_indices(X_train, word_to_index, maxLen)
Y_train_oh = convert_to_one_hot(Y_train, C = 5)
model.fit(X_train_indices, Y_train_oh, epochs = 50, batch_size = 32, shuffle=True)
X_test_indices = sentences_to_indices(X_test, word_to_index, max_len = maxLen)
Y_test_oh = convert_to_one_hot(Y_test, C = 5)
loss, acc = model.evaluate(X_test_indices, Y_test_oh)
print()
print("Test accuracy = ", acc)
# enter your sentences here
x_test = np.array(['please love me :)'])
X_test_indices = sentences_to_indices(x_test, word_to_index, maxLen)
print(x_test[0] +' '+ label_to_emoji(np.argmax(model.predict(X_test_indices))))