|
|
|
import random |
|
from tensorflow.keras.optimizers import SGD |
|
from keras.layers import Dense, Dropout |
|
from keras.models import load_model |
|
from keras.models import Sequential |
|
import numpy as np |
|
import pickle |
|
import json |
|
import nltk |
|
from nltk.stem import WordNetLemmatizer |
|
lemmatizer = WordNetLemmatizer() |
|
nltk.download('omw-1.4') |
|
nltk.download("punkt") |
|
nltk.download("wordnet") |
|
|
|
|
|
|
|
words = [] |
|
classes = [] |
|
documents = [] |
|
ignore_words = ["?", "!"] |
|
data_file = open("F:\\Data Science Course - IIITB\\NLP\\Chatbot\\AI Chatbot\\An-AI-Chatbot-in-Python-and-Flask-main\\intents.json").read() |
|
intents = json.loads(data_file) |
|
|
|
|
|
for intent in intents["intents"]: |
|
for pattern in intent["patterns"]: |
|
|
|
|
|
w = nltk.word_tokenize(pattern) |
|
words.extend(w) |
|
|
|
documents.append((w, intent["tag"])) |
|
|
|
|
|
if intent["tag"] not in classes: |
|
classes.append(intent["tag"]) |
|
|
|
|
|
words = [lemmatizer.lemmatize(w.lower()) for w in words if w not in ignore_words] |
|
words = sorted(list(set(words))) |
|
|
|
classes = sorted(list(set(classes))) |
|
|
|
print(len(documents), "documents") |
|
|
|
print(len(classes), "classes", classes) |
|
|
|
print(len(words), "unique lemmatized words", words) |
|
|
|
|
|
pickle.dump(words, open("words.pkl", "wb")) |
|
pickle.dump(classes, open("classes.pkl", "wb")) |
|
|
|
|
|
|
|
training = [] |
|
output_empty = [0] * len(classes) |
|
for doc in documents: |
|
|
|
bag = [] |
|
|
|
pattern_words = doc[0] |
|
|
|
pattern_words = [lemmatizer.lemmatize(word.lower()) for word in pattern_words] |
|
|
|
for w in words: |
|
bag.append(1) if w in pattern_words else bag.append(0) |
|
|
|
|
|
output_row = list(output_empty) |
|
output_row[classes.index(doc[1])] = 1 |
|
|
|
training.append([bag, output_row]) |
|
|
|
|
|
random.shuffle(training) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
train_x = [item[0] for item in training] |
|
train_y = [item[1] for item in training] |
|
|
|
|
|
train_x = np.array(train_x) |
|
train_y = np.array(train_y) |
|
print("Training data created") |
|
|
|
|
|
|
|
|
|
model = Sequential() |
|
model.add(Dense(128, input_shape=(len(train_x[0]),), activation="relu")) |
|
model.add(Dropout(0.5)) |
|
model.add(Dense(64, activation="relu")) |
|
model.add(Dropout(0.5)) |
|
model.add(Dense(len(train_y[0]), activation="softmax")) |
|
model.summary() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sgd = SGD(learning_rate=0.01, momentum=0.9, nesterov=True) |
|
model.compile(loss="categorical_crossentropy", optimizer=sgd, metrics=["accuracy"]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1) |
|
model.save("chatbot_model.h5", hist) |
|
print("model created") |
|
|
|
|