Building Simple Chatbot From Scratch
All about how simple chatbot is built.
- Creating Environment for Chatbot
- Preprocessing the json data
- Now Prediction Time
- Look at the Result
- Adding some context to the conversation i.e. Contexualization for altering question and intents etc.
- See the result after adding contextualization
Creating Environment for Chatbot
-
Creating conda environment for chatbot:
conda create -n chatbot python=3.6 -y
-
Activate the environment:
conda activate chatbot
-
Install all the dependencies:
pip install nltk
pip install numpy
pip install tensorflow-gpu
For GPU onlypip install tflearn
You can download intents.json file from here
Now Importing the required libraries.
import nltk
nltk.download('punkt')
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
# Libraries needed for Tensorflow processing
import tensorflow as tf
import numpy as np
import tflearn
import random
import json
import pickle
- Here we are loading our Intents.json file where we have defined our intents, patterns, responses etc.
with open('intents.json') as json_data:
intents = json.load(json_data)
- These are the json file we have created for this purpose.
intents
words = []
classes = []
documents = []
ignore = ['?']
# loop through each sentence in the intent's patterns
for intent in intents['intents']:
for pattern in intent['patterns']:
# tokenize each and every word in the sentence
w = nltk.word_tokenize(pattern)
# add word to the words list
words.extend(w)
# add word(s) to documents
documents.append((w, intent['tag']))
# add tags to our classes list
if intent['tag'] not in classes:
classes.append(intent['tag'])
- In this cell we are stemming all the collected words and lowercasing them simulteniously and removing the duplicate word from the word, classes list.
words = [stemmer.stem(w.lower()) for w in words if w not in ignore]
words = sorted(list(set(words)))
# remove duplicate classes
classes = sorted(list(set(classes)))
print (len(documents), "documents")
print (len(classes), "classes", classes)
print (len(words), "unique stemmed words", words)
- In this cell we are preparing the data for the training purposes:
training = []
output = []
# create an empty array for output
output_empty = [0] * len(classes)
# create training set, bag of words for each sentence
for doc in documents:
# initialize bag of words
bag = []
# list of tokenized words for the pattern
pattern_words = doc[0]
# stemming each word
pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]
# create bag of words array
for w in words:
bag.append(1) if w in pattern_words else bag.append(0)
# output is '1' for current tag and '0' for rest of other tags
output_row = list(output_empty)
output_row[classes.index(doc[1])] = 1
training.append([bag, output_row])
# shuffling features and turning it into np.array
random.shuffle(training)
training = np.array(training)
# creating training lists
train_x = list(training[:,0])
train_y = list(training[:,1])
In this shell we are using tflearn wrapper for faster training and easy execution than keras.
- We are building Deep Neural Network on top of tflearn.
- To learn more about tflearn visit: Tflearn.
- We give input to the tflearn as a placeholder.
- Then add a 10 node each for two fully connected layers.
- Then at last we give the total number of classes to the softmax layer.
- Output of softmax layer is the probability of the class and it is given to the tflearn.regression function which will perform a regression (linear or logistic) to the provided input.
- Now we give the output of tflearn.regression to tflearn Deep Neural Network Model.
tf.compat.v1.reset_default_graph()
# Building neural network
net = tflearn.input_data(shape=[None, len(train_x[0])])
net = tflearn.fully_connected(net, 10)
net = tflearn.fully_connected(net, 10)
net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')
net = tflearn.regression(net)
# Defining model and setting up tensorboard
model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')
# Start training
model.fit(train_x, train_y, n_epoch=1000, batch_size=8, show_metric=True)
model.save('model.tflearn')
- In this step we are storing extracted words, classes, train_x and train_y to training_data using pickle.dump function.
pickle.dump( {'words':words, 'classes':classes, 'train_x':train_x, 'train_y':train_y}, open( "training_data", "wb" ) )
data = pickle.load( open( "training_data", "rb" ) )
words = data['words']
classes = data['classes']
train_x = data['train_x']
train_y = data['train_y']
- Loading intents.json file.
with open('intents.json') as json_data:
intents = json.load(json_data)
Loading trained model.
model.load('./model.tflearn')
-
clean_up_sentences In this function we are taking input sentence from the user and cleaning it up. We are doing word tokenization and stemming which returns stemmed words.
-
bow function In this function we give sentence and extracted as a input and return the array of bag of words.
def clean_up_sentence(sentence):
# tokenizing the pattern
sentence_words = nltk.word_tokenize(sentence)
# stemming each word
sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]
return sentence_words
# returning bag of words array: 0 or 1 for each word in the bag that exists in the sentence
def bow(sentence, words, show_details=False):
# tokenizing the pattern
sentence_words = clean_up_sentence(sentence)
# generating bag of words
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
bag[i] = 1
if show_details:
print ("found in bag: %s" % w)
return(np.array(bag))
-
classify function In this function we give the input as a sentence, which will predict the intent of the sentence and return the intent and its probability.
-
response function This function takes input as a sentence and returns the response of the chatbot.
ERROR_THRESHOLD = 0.30
def classify(sentence):
# generate probabilities from the model
results = model.predict([bow(sentence, words)])[0]
# filter out predictions below a threshold
results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append((classes[r[0]], r[1]))
# return tuple of intent and probability
return return_list
def response(sentence, userID='123', show_details=False):
results = classify(sentence)
# if we have a classification then find the matching intent tag
if results:
# loop as long as there are matches to process
while results:
for i in intents['intents']:
# find a tag matching the first result
if i['tag'] == results[0][0]:
# a random response from the intent
return print(random.choice(i['responses']))
results.pop(0)
classify('What are you hours of operation?')
response('What is machine learning?')
response('What is menu for today?')
response('Do you accept Credit Card?')
response('Where can we locate you?')
response('That is helpful')
response('Bye')
context = {}
ERROR_THRESHOLD = 0.25
def classify(sentence):
# generate probabilities from the model
results = model.predict([bow(sentence, words)])[0]
# filter out predictions below a threshold
results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append((classes[r[0]], r[1]))
# return tuple of intent and probability
return return_list
def response(sentence, userID='123', show_details=False):
results = classify(sentence)
# if we have a classification then find the matching intent tag
if results:
# loop as long as there are matches to process
while results:
for i in intents['intents']:
# find a tag matching the first result
if i['tag'] == results[0][0]:
# set context for this intent if necessary
if 'context_set' in i:
if show_details: print ('context:', i['context_set'])
context[userID] = i['context_set']
# check if this intent is contextual and applies to this user's conversation
if not 'context_filter' in i or \
(userID in context and 'context_filter' in i and i['context_filter'] == context[userID]):
if show_details: print ('tag:', i['tag'])
# a random response from the intent
return print(random.choice(i['responses']))
results.pop(0)
response('Can you please let me know the delivery options?')
response('What is menu for today?')
response("Hi there!", show_details=True)
context
response('What is menu for today?')
- Here we ask a question that is not in the intents.json file and it gives preety well results. In this way we can add new intents and questions to the chatbot.