skip to Main Content

My Aim: Be able to integrate Al Chatbot and Discord

import nltk

nltk.download('punkt')

from nltk.stem.lancaster import LancasterStemmer
stemmer=LancasterStemmer()
import numpy
import tflearn
import tensorflow
import random
import json
import pickle
import nest_asyncio
import asyncio
#---------------------------------------------------------------------------
import discord 
import os





with open("intents.json") as file:
     data=json.load(file)
     print(data['intents'])
     
try:
    with open("data.pickle","rb") as f:
        words,labels,training,output=pickle.load(f)
except: 
    words=[]
    labels=[]
    docs_x=[]
    docs_y=[]
    
    for intent in data['intents']:
        for pattern in intent['patterns']:
            wrds=nltk.word_tokenize(pattern)
            words.extend(wrds)
            docs_x.append(wrds)
            docs_y.append(intent["tag"])
             
            
            if intent["tag"] not in labels:
               labels.append(intent["tag"])
               
               
    #remove duplicate          
    words=[stemmer.stem(w.lower()) for w in words if w != "?"]
    
    words=sorted(list(set(words)))
    
    labels=sorted(labels)
     
    
    training=[] 
    output=[]
    
    out_empty=[0 for _ in range(len(labels))]
    
    for x, doc in enumerate(docs_x):
        bag=[]
        wrds=[stemmer.stem(w) for w in doc]
        
        for w in words:
          if w in wrds:
             bag.append(1)
          else:
             bag.append(0)
             
        output_row=out_empty[:]
             
        output_row[labels.index(docs_y[x])]=1
        
        training.append(bag)     
        output.append(output_row)
        
    training=numpy.array(training)
    output=numpy.array(output)
    
    with open("data.pickle","wb") as f:
        pickle.dump((words,labels,training,output),f)

tensorflow.compat.v1.reset_default_graph()

net=tflearn.input_data(shape=[None,len(training[0])])
net=tflearn.fully_connected(net,16) 
net=tflearn.fully_connected(net,16)
net=tflearn.fully_connected(net,len(output[0]),activation="softmax")
net=tflearn.regression(net)

model=tflearn.DNN(net)

model.fit(training, output,n_epoch=10000,batch_size=16,show_metric=True )    

model.save('C:/Users/Desktop/chatbot/model/model.tflearn')
model.load('C:/Users/Desktop/chatbot/model/model.tflearn')

    





def bag_of_words(s,words):

   bag=[0 for _ in range(len(words))]
   s_words=nltk.word_tokenize(s)
   s_words=[stemmer.stem(word.lower()) for word in s_words]

 
   for se in s_words:
       for i,w in enumerate(words):
           if w==se:
              bag[i]=1

   return numpy.array(bag)



def chat():
    print("start talking with the bot (type quit to stop!")
    while True:
        inp=input("You:")
        if inp.lower()=="quit":
           break
       
        results= model.predict([bag_of_words(inp,words)])[0]
        # print("results:",results)
       
        results_index=numpy.argmax(results)
        
        if results[results_index]>0.7:
            
                
            tag=labels[results_index]
            print("tag:", tag)
        
            for tg in data["intents"]:
                if tg["tag"]==tag:
                   responses=tg['responses']
            
            client=discord.Client()             #FOR DISCORD--------------------------------------
            async def on_message(message):
                if inp.author == client.user:
                   return
               
                if inp.content.startswith("$M-bot"):
                    response=responses.request(inp.content[7:])
                    await asyncio.sleep(5) 
                    await inp.channel.send(response) 
                    
            
            on_message(inp)    
            client.run("API KEY TAKEN FROM DISCORD for BOT")
            print("Bot:",random.choice(responses))
         
        else:
          print("I didn't get that. Please try again")  
          
chat()  

Warnings and Errors (Pyconsole):

start talking with the bot (type quit to stop!

You:hello
tag: greeting
C:/Users/Desktop/chatbot/chatbot.py:154: RuntimeWarning: coroutine 'chat.<locals>.on_message' was never awaited
  on_message(inp)
RuntimeWarning: Enable tracemalloc to get the object allocation traceback
Traceback (most recent call last):

  File "F:Anacondalibsite-packagesdiscordclient.py", line 713, in run
    loop.run_forever()

  File "F:Anacondalibasynciobase_events.py", line 560, in run_forever
    self._check_running()

  File "F:Anacondalibasynciobase_events.py", line 552, in _check_running
    raise RuntimeError('This event loop is already running')

RuntimeError: This event loop is already running


During handling of the above exception, another exception occurred:

Traceback (most recent call last):

  File "F:Anacondalibsite-packagesdiscordclient.py", line 90, in _cleanup_loop
    _cancel_tasks(loop)

  File "F:Anacondalibsite-packagesdiscordclient.py", line 75, in _cancel_tasks
    loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))

  File "F:Anacondalibasynciobase_events.py", line 592, in run_until_complete
    self._check_running()

  File "F:Anacondalibasynciobase_events.py", line 552, in _check_running
    raise RuntimeError('This event loop is already running')

RuntimeError: This event loop is already running


During handling of the above exception, another exception occurred:

Traceback (most recent call last):

  File "C:/Users/Desktop/chatbot/chatbot.py", line 162, in <module>
    chat()

  File "C:/Users/Desktop/chatbot/chatbot.py", line 155, in chat
    client.run("API KEY TAKEN FROM DISCORD for BOT")

  File "F:Anacondalibsite-packagesdiscordclient.py", line 719, in run
    _cleanup_loop(loop)

  File "F:Anacondalibsite-packagesdiscordclient.py", line 95, in _cleanup_loop
    loop.close()

  File "F:Anacondalibasyncioselector_events.py", line 89, in close
    raise RuntimeError("Cannot close a running event loop")

RuntimeError: Cannot close a running event loop

PROBLEM: Hello Friends, I’m trying to make a chatbot that works on discord and can give its answers through the artificial intelligence model I built, but I am getting a RuntimeWarning: Enable tracemalloc to get the object allocation traceback and RuntimeError: This event loop is already running How can I solve these?

2

Answers


  1. Chosen as BEST ANSWER

    arrangements:

    
        import nltk
        
        nltk.download('punkt')
        
        from nltk.stem.lancaster import LancasterStemmer
        stemmer=LancasterStemmer()
        import numpy
        import tflearn
        import tensorflow
        import random
        import json
        import pickle
        import nest_asyncio
        import asyncio
        #-------------------------------------------------------------------------------
        import discord 
        import os
        
        
        
        with open("intents.json") as file:
             data=json.load(file)
             print(data['intents'])
             
        client=discord.Client() #OUT OF LOOP
        
        @client.event                        #LISTEN EVENTS
        async def on_message(message):
                  
                  if message.author == client.user:
                     return
                 
                  if message.content.startswith("$M-bot"):
                      response=responses.request(message.content[7:])
        
                      await message.channel.send(response) 
                     
            
               
           
             
        try:
            with open("data.pickle","rb") as f:
                words,labels,training,output=pickle.load(f)
        except: 
            words=[]
            labels=[]
            docs_x=[]
            docs_y=[]
            
            for intent in data['intents']:
                for pattern in intent['patterns']:
                    wrds=nltk.word_tokenize(pattern)
                    words.extend(wrds)
                    docs_x.append(wrds)
                    docs_y.append(intent["tag"])
                     
                    
                    if intent["tag"] not in labels:
                       labels.append(intent["tag"])
                       
                       
            #remove duplicate          
            words=[stemmer.stem(w.lower()) for w in words if w != "?"]
            
            words=sorted(list(set(words)))
            
            labels=sorted(labels)
             
            
            training=[] 
            output=[]
            
            out_empty=[0 for _ in range(len(labels))]
            
            for x, doc in enumerate(docs_x):
                bag=[]
                wrds=[stemmer.stem(w) for w in doc]
                
                for w in words:
                  if w in wrds:
                     bag.append(1)
                  else:
                     bag.append(0)
                     
                output_row=out_empty[:]
                     
                output_row[labels.index(docs_y[x])]=1
                
                training.append(bag)     
                output.append(output_row)
                
            training=numpy.array(training)
            output=numpy.array(output)
            
            with open("data.pickle","wb") as f:
                pickle.dump((words,labels,training,output),f)
        
        tensorflow.compat.v1.reset_default_graph()
        
        net=tflearn.input_data(shape=[None,len(training[0])])
        net=tflearn.fully_connected(net,16) 
        net=tflearn.fully_connected(net,16)
        net=tflearn.fully_connected(net,len(output[0]),activation="softmax")
        net=tflearn.regression(net)
        
        model=tflearn.DNN(net)
        
        model.fit(training, output,n_epoch=5000,batch_size=16,show_metric=True )    
        
        model.save('C:/Users/Desktop/chatbot/model/model.tflearn')
        model.load('C:/Users/Desktop/chatbot/model/model.tflearn')
        
            
        
        def bag_of_words(s,words):
        
           bag=[0 for _ in range(len(words))]
           s_words=nltk.word_tokenize(s)
           s_words=[stemmer.stem(word.lower()) for word in s_words]
        
         
           for se in s_words:
               for i,w in enumerate(words):
                   if w==se:
                      bag[i]=1
        
           return numpy.array(bag)
        
        
        
        def chat():
            global responses          #GLOBAL VARIABLES
            global inp                #GLOBAL VARIABLES
            print("start talking with the bot (type quit to stop!")
            while True:
                inp=input("You:")
                if inp.lower()=="quit":
                   break
               
                results= model.predict([bag_of_words(inp,words)])[0]
                # print("results:",results)
               
                results_index=numpy.argmax(results)
                
                if results[results_index]>0.7:
                    
                        
                    tag=labels[results_index]
                    print("tag:", tag)
                
                    for tg in data["intents"]:
                        if tg["tag"]==tag:
                           responses=tg['responses']
                    
                 
                   
                    
                    print("Bot:",random.choice(responses))
                 
                else:
                  print("I didn't get that. Please try again")  
                  
        
        chat()  
          
        client.run("API KEY") 
    
    

  2. Your error is because you keep reinitiating discord.Client. In every program, there should be only one instance of discord.Client. If you want to make it spit out the last response, you should move client out of the loop. Set the bot’s response to a global variable and have the bot spit out the global variable when a command is sent

    Login or Signup to reply.
Please signup or login to give your own answer.
Back To Top
Search