forked from bradtraversy/alexis_speech_assistant
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathasis.py
262 lines (217 loc) · 9.83 KB
/
asis.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
import speech_recognition as sr # recognise speech
import playsound # to play an audio file
from gtts import gTTS # google text to speech
import random
from time import ctime # get time details
import webbrowser # open browser
import ssl
import certifi
import time
import os # to remove created audio files
from PIL import Image
import subprocess
import pyautogui #screenshot
import pyttsx3
import bs4 as bs
import urllib.request
import requests
class person:
name = ''
def setName(self, name):
self.name = name
class asis:
name = ''
def setName(self, name):
self.name = name
def there_exists(terms):
for term in terms:
if term in voice_data:
return True
def engine_speak(text):
text = str(text)
engine.say(text)
engine.runAndWait()
r = sr.Recognizer() # initialise a recogniser
# listen for audio and convert it to text:
def record_audio(ask=""):
with sr.Microphone() as source: # microphone as source
if ask:
engine_speak(ask)
audio = r.listen(source, 5, 5) # listen for the audio via source
print("Done Listening")
voice_data = ''
try:
voice_data = r.recognize_google(audio) # convert audio to text
except sr.UnknownValueError: # error: recognizer does not understand
engine_speak('I did not get that')
except sr.RequestError:
engine_speak('Sorry, the service is down') # error: recognizer is not connected
print(">>", voice_data.lower()) # print what user said
return voice_data.lower()
# get string and make a audio file to be played
def engine_speak(audio_string):
audio_string = str(audio_string)
tts = gTTS(text=audio_string, lang='en') # text to speech(voice)
r = random.randint(1,20000000)
audio_file = 'audio' + str(r) + '.mp3'
tts.save(audio_file) # save as mp3
playsound.playsound(audio_file) # play the audio file
print(asis_obj.name + ":", audio_string) # print what app said
os.remove(audio_file) # remove audio file
def respond(voice_data):
# 1: greeting
if there_exists(['hey','hi','hello']):
greetings = ["hey, how can I help you" + person_obj.name, "hey, what's up?" + person_obj.name, "I'm listening" + person_obj.name, "how can I help you?" + person_obj.name, "hello" + person_obj.name]
greet = greetings[random.randint(0,len(greetings)-1)]
engine_speak(greet)
# 2: name
if there_exists(["what is your name","what's your name","tell me your name"]):
if person_obj.name:
engine_speak(f"My name is {asis_obj.name}, {person_obj.name}") #gets users name from voice input
else:
engine_speak(f"My name is {asis_obj.name}. what's your name?") #incase you haven't provided your name.
if there_exists(["my name is"]):
person_name = voice_data.split("is")[-1].strip()
engine_speak("okay, i will remember that " + person_name)
person_obj.setName(person_name) # remember name in person object
if there_exists(["what is my name"]):
engine_speak("Your name must be " + person_obj.name)
if there_exists(["your name should be"]):
asis_name = voice_data.split("be")[-1].strip()
engine_speak("okay, i will remember that my name is " + asis_name)
asis_obj.setName(asis_name) # remember name in asis object
# 3: greeting
if there_exists(["how are you","how are you doing"]):
engine_speak("I'm very well, thanks for asking " + person_obj.name)
# 4: time
if there_exists(["what's the time","tell me the time","what time is it","what is the time"]):
time = ctime().split(" ")[3].split(":")[0:2]
if time[0] == "00":
hours = '12'
else:
hours = time[0]
minutes = time[1]
time = hours + " hours and " + minutes + "minutes"
engine_speak(time)
# 5: search google
if there_exists(["search for"]) and 'youtube' not in voice_data:
search_term = voice_data.split("for")[-1]
url = "https://google.com/search?q=" + search_term
webbrowser.get().open(url)
engine_speak("Here is what I found for" + search_term + "on google")
if there_exists(["search"]) and 'youtube' not in voice_data:
search_term = voice_data.replace("search","")
url = "https://google.com/search?q=" + search_term
webbrowser.get().open(url)
engine_speak("Here is what I found for" + search_term + "on google")
# 6: search youtube
if there_exists(["youtube"]):
search_term = voice_data.split("for")[-1]
search_term = search_term.replace("on youtube","").replace("search","")
url = "https://www.youtube.com/results?search_query=" + search_term
webbrowser.get().open(url)
engine_speak("Here is what I found for " + search_term + "on youtube")
#7: get stock price
if there_exists(["price of"]):
search_term = voice_data.split("for")[-1]
url = "https://google.com/search?q=" + search_term
webbrowser.get().open(url)
engine_speak("Here is what I found for " + search_term + " on google")
#8 time table
if there_exists(["show my time table"]):
im = Image.open(r"D:\WhatsApp Image 2019-12-26 at 10.51.10 AM.jpeg")
im.show()
#9 weather
if there_exists(["weather"]):
search_term = voice_data.split("for")[-1]
url = "https://www.google.com/search?sxsrf=ACYBGNSQwMLDByBwdVFIUCbQqya-ET7AAA%3A1578847393212&ei=oUwbXtbXDN-C4-EP-5u82AE&q=weather&oq=weather&gs_l=psy-ab.3..35i39i285i70i256j0i67l4j0i131i67j0i131j0i67l2j0.1630.4591..5475...1.2..2.322.1659.9j5j0j1......0....1..gws-wiz.....10..0i71j35i39j35i362i39._5eSPD47bv8&ved=0ahUKEwiWrJvwwP7mAhVfwTgGHfsNDxsQ4dUDCAs&uact=5"
webbrowser.get().open(url)
engine_speak("Here is what I found for on google")
#10 stone paper scisorrs
if there_exists(["game"]):
voice_data = record_audio("choose among rock paper or scissor")
moves=["rock", "paper", "scissor"]
cmove=random.choice(moves)
pmove=voice_data
engine_speak("The computer chose " + cmove)
engine_speak("You chose " + pmove)
#engine_speak("hi")
if pmove==cmove:
engine_speak("the match is draw")
elif pmove== "rock" and cmove== "scissor":
engine_speak("Player wins")
elif pmove== "rock" and cmove== "paper":
engine_speak("Computer wins")
elif pmove== "paper" and cmove== "rock":
engine_speak("Player wins")
elif pmove== "paper" and cmove== "scissor":
engine_speak("Computer wins")
elif pmove== "scissor" and cmove== "paper":
engine_speak("Player wins")
elif pmove== "scissor" and cmove== "rock":
engine_speak("Computer wins")
#11 toss a coin
if there_exists(["toss","flip","coin"]):
moves=["head", "tails"]
cmove=random.choice(moves)
engine_speak("The computer chose " + cmove)
#12 calc
if there_exists(["plus","minus","multiply","divide","power","+","-","*","/"]):
opr = voice_data.split()[1]
if opr == '+':
engine_speak(int(voice_data.split()[0]) + int(voice_data.split()[2]))
elif opr == '-':
engine_speak(int(voice_data.split()[0]) - int(voice_data.split()[2]))
elif opr == 'multiply' or 'x':
engine_speak(int(voice_data.split()[0]) * int(voice_data.split()[2]))
elif opr == 'divide':
engine_speak(int(voice_data.split()[0]) / int(voice_data.split()[2]))
elif opr == 'power':
engine_speak(int(voice_data.split()[0]) ** int(voice_data.split()[2]))
else:
engine_speak("Wrong Operator")
#13 screenshot
if there_exists(["capture","my screen","screenshot"]):
myScreenshot = pyautogui.screenshot()
myScreenshot.save('D:/screenshot/screen.png')
#14 to search wikipedia for definition
if there_exists(["definition of"]):
definition=record_audio("what do you need the definition of")
url=urllib.request.urlopen('https://en.wikipedia.org/wiki/'+definition)
soup=bs.BeautifulSoup(url,'lxml')
definitions=[]
for paragraph in soup.find_all('p'):
definitions.append(str(paragraph.text))
if definitions:
if definitions[0]:
engine_speak('im sorry i could not find that definition, please try a web search')
elif definitions[1]:
engine_speak('here is what i found '+definitions[1])
else:
engine_speak ('Here is what i found '+definitions[2])
else:
engine_speak("im sorry i could not find the definition for "+definition)
if there_exists(["exit", "quit", "goodbye"]):
engine_speak("bye")
exit()
# Current city or region
if there_exists(["where am i"]):
Ip_info = requests.get('https://api.ipdata.co?api-key=test').json()
loc = Ip_info['region']
engine_speak(f"You must be somewhere in {loc}")
# Current location as per Google maps
if there_exists(["what is my exact location"]):
url = "https://www.google.com/maps/search/Where+am+I+?/"
webbrowser.get().open(url)
engine_speak("You must be somewhere near here, as per Google maps")
time.sleep(1)
person_obj = person()
asis_obj = asis()
asis_obj.name = 'kiki'
person_obj.name = ""
engine = pyttsx3.init()
while(1):
voice_data = record_audio("Recording") # get the voice input
print("Done")
print("Q:", voice_data)
respond(voice_data) # respond