-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathVoiceAssistant_5.3.py
1484 lines (1274 loc) · 65.2 KB
/
VoiceAssistant_5.3.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import os, time, math, re, json, pyttsx3, requests, pvporcupine, struct, pyaudio, datetime, tiktoken
import speech_recognition as sr
from colorama import init, Fore, Back, Style
from urllib.request import urlopen
from googleapiclient.discovery import build
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
from google.cloud import texttospeech
try:
# For Raspberry Pi
import Adafruit_DHT
from luma.led_matrix.device import max7219
from luma.core.interface.serial import spi, noop
from luma.core.render import canvas
MAX7219Lib = True
except:
print("No MAX7219 or Adafruit packages found, using settings to not use 8x8 matrix display")
MAX7219Lib = False
try:
import vlc # For Raspberry Pi
print("VLC package found (this is best for Raspberry Pi's)")
vlcLib = True
except:
print("No VLC package found (this is fine if you have Pygame installed)")
vlcLib = False
try:
import pygame # For Windows
pygameLib = True
print("Pygame package found (this is best for Windows)")
except:
print("No Pygame package found (this is fine if you have VLC installed)")
pygameLib = False
try:
from google.cloud import speech
except:
print("No Google Cloud package found (offline STT will be used instead)")
googleSTT = False
from CustomSettings import assistantSpeechOn, offlineTTS, textInput, keepOnListening, listenTime, sumHistoryTime, messageLimit, startPrompt, summarizePrompt
from CustomSettings import wakeUpWords, STABILITY, SIMILARITY_BOOST, VOICE_ID, animationFPS, googleSTT, swedish, english, maxToolsPerPrompt, openAIdelay
from CustomSettings import googleTTS_name, googleTTS_gender, swedishStartPrompt, wakeWordOn, wakeSpeaker, speakerSleepTime, RaspberryPi, devMode, overrideMemPrompt
from CustomSettings import sweOverrideMemPrompt, GPT4, elevenLabs, wolframAlpha, googleSearch
from apiKeys import openai_api_key, porcupineAccessKey, XI_API_KEY, googleCustomSearchAPI, googleSearchEngineID, GOOGLE_JSON_CREDENTIALS, wolframAlphaAppID
import openai
openai.api_key = openai_api_key
init(autoreset=True) # Initializes Colorama (print colored text)
if wolframAlpha:
os.environ["WOLFRAM_ALPHA_APPID"] = wolframAlphaAppID
wolfram = WolframAlphaAPIWrapper()
if RaspberryPi:
try:
from ctypes import *
ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p) # Define our error handler type
def py_error_handler(filename, line, function, err, fmt):
pass
c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)
asound = cdll.LoadLibrary('libasound.so')
asound.snd_lib_error_set_handler(c_error_handler) # Set error handler
print("Libasound module found (Libasound error handling will be used)")
except:
print("No Libasound module found (No Libasound error handling will be used)")
RaspberryPi = False
# Porcupine (Wake-word recognizer)
if wakeWordOn:
try:
porcupine = pvporcupine.create(
access_key=porcupineAccessKey,
keywords=wakeUpWords # Wake-up-words
)
except:
print("No Porcupine Access Key found in apiKeys.py file (wake-word recognizer will not be used)")
wakeWordOn = False
scriptDir = os.path.dirname(os.path.abspath(__file__))
longTermMemoryPath = os.path.join(scriptDir, "longTermMemory.json")
textToSpeechFilePath = os.path.join(scriptDir, "textToSpeech.mp3")
googleCredentialPath = os.path.join(scriptDir, GOOGLE_JSON_CREDENTIALS)
if os.path.exists(googleCredentialPath):
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = googleCredentialPath
else:
print(Style.BRIGHT+Fore.RED+"WARNING: Google JSON Credentials File was not found. Please provide a valid file name for it in apiKeys.py to use Google Search, STT and TTS.")
if swedish:
print(Style.BRIGHT+Fore.RED+"Assistant requires Google STT and TTS to talk Swedish, which means that only english is used until you have fixed this.")
swedish = False
english = True
googleSTT = False
googleSearch = False
print("Using another STT in english instead")
# ElevenLabs
OUTPUT_PATH = "textToSpeech.mp3"
# IP info, location
IPinfoURL='http://ipinfo.io/json'
IPinfoResponse=urlopen(IPinfoURL)
IPinfoData=json.load(IPinfoResponse)
city = IPinfoData['city']
postal = IPinfoData['postal']
timezone = IPinfoData['timezone']
country = IPinfoData['country']
if country == "SE": country = "Sweden"
if MAX7219Lib == True:
from mutagen.mp3 import MP3
from images import loading_frames, talk_frames, think_frame
# Define pins for MAX7219
serial = spi(port=0, device=0, gpio=noop())
device = max7219(serial, cascaded=1)
# Define pin for DHT11
dht11_pin = 4
sensor = Adafruit_DHT.DHT11
animationSPF = 1/animationFPS
# Offline text-to-speech
tts = pyttsx3.init()
tts.startLoop(False)
# PyAudio
pa = None
audio_stream = None
# Speech-to-text
stt = sr.Recognizer()
# Initialize pygame
if (pygameLib == True) and (vlcLib == False):
pygame.init()
pygame.mixer.init()
frameLength = 512
sampleRate = 16000
hasSummarized = True
lastOpenAIresponse = time.time() - openAIdelay
start_time = time.time()
lastSoundTime = 0
history = []
messages = []
messageCount = 0
totAnswerTokens = 0
messageTokens = 0
kwargs = {}
totalCost = 0
rstMemStage = 0
saveMemStage = 0
memRstChoice = ''
math_functions = {
"sin": math.sin,
"cos": math.cos,
"tan": math.tan,
"asin": math.asin,
"acos": math.acos,
"atan": math.atan,
"exp": math.exp,
"log": math.log,
"log10": math.log10,
"sqrt": math.sqrt,
"ceil": math.ceil,
"floor": math.floor,
"pi": math.pi,
"e": math.e
}
# Infinite main loop
def main():
# Listen for wake-word
if wakeWordOn:
pcm = audio_stream.read(porcupine.frame_length)
pcm = struct.unpack_from("h" * porcupine.frame_length, pcm)
keyword_index = porcupine.process(pcm)
if (wakeWordOn == False) or (textInput == True): keyword_index = 1 # Trigger a virtual "wake-word detected", to go into the loop instantly
global start_time
global hasSummarized
global lastSoundTime
if keyword_index >= 0: # If wake-word detected
if wakeWordOn and (textInput == False): print(Style.NORMAL+Fore.WHITE+"Wake-word detected")
if MAX7219Lib == True: animate(loading_frames, animationSPF) # Visualize assistant wake-up on matrix display
start_time = time.time()
while time.time() - start_time < listenTime: # Listen for certain amount of seconds
if textInput == True:
userPrompt = input(Style.BRIGHT + Fore.GREEN + 'User: '+ Style.NORMAL)
else:
if googleSTT == True:
userPrompt = googleSpeechToText()
else:
userPrompt = speechToText() # Listen to speech from user
closeAudioStream() # Stop listening (to not trigger speech when assistant is talking)
if userPrompt is not None or userPrompt != '':
if textInput == False: print(Style.BRIGHT+Fore.GREEN+"User: "+Style.NORMAL+userPrompt)
if userPrompt == None or userPrompt == '': # If there was no speech
break # Break out of loop, and restart
else: # If there was speech
if MAX7219Lib == True: still(think_frame, 0) # Visualize thinking on matrix display
toolAnswer = useTool(userPrompt)
answer(userPrompt, toolAnswer)
if wakeSpeaker and assistantSpeechOn: lastSoundTime = time.time()
hasSummarized = False
start_time = time.time()
if keepOnListening == False: break # Stop listening, if keepOnListening is set to false by user
elif (keepOnListening == True) and (textInput == False):
print("Listening...")
openAudioStream() # Start listning again
# Restart
if MAX7219Lib == True: animate(reversed(loading_frames), animationSPF) # Visualize assistant sleep on matrix display
openAudioStream()
if wakeWordOn and (textInput == False): print(Style.NORMAL+Fore.WHITE+"Waiting for wake-word")
if time.time() - start_time > sumHistoryTime: # if (time.time() - start_time > sumHistoryTime) and (hasSummarized == False) # TEST
print(Fore.YELLOW+str(sumHistoryTime)+" seconds has passed since last conversation")
summarizeHistory()
if wakeWordOn and (textInput == False): print("Waiting for wake-word")
elif (keepOnListening == True) and (textInput == False): print("Listening...")
if wakeSpeaker and assistantSpeechOn and (time.time() - lastSoundTime > speakerSleepTime):
beep()
lastSoundTime = time.time()
def youSure(memory, sureBool=True):
global overrideMemPrompt
global sweOverrideMemPrompt
if english == True:
if 'SAVE' in memory: # Ask to save memory
toolAnswer = '[PRIO]You must ask the user, in your next message, for approval if you understood correctly that the short term memory (current conversation) of this device is preferred to be saved to the long term memory.'
else: # Ask to delete memory
if 'BOTH' in memory: text="BOTH the long term memory AND short term memory of this device are"
elif 'LONG TERM' in memory: text="the long term memory of this device is"
elif 'SHORT TERM' in memory: text="the short term memory of this device is"
if sureBool == False:
toolAnswer = "[PRIO]"+overrideMemPrompt+f"You must ask the user again, in your next message, for approval if you understood correctly that {text} preferred to be deleted. Ask if the user meant to say yes or no."
else:
toolAnswer = "[PRIO]"+overrideMemPrompt+f'You must ask the user, in your next message, for approval if you understood correctly that {text} preferred to be deleted. Ask if the user meant to say yes or no.'
elif english == False:
if 'SAVE' in memory: # Ask to save memory
toolAnswer = '[PRIO]Du har en falsk konversation med en användare. Låtsas att du kan spara minne på den här enheten. Du MÅSTE be användaren, i ditt nästa meddelande, om godkännande om du förstått rätt att korttidsminnet (aktuell konversation) för denna enhet föredras att sparas i långtidsminnet.'
else: # Ask to delete memory
if 'BOTH' in memory: text="BÅDE långtidsminnet OCH korttidsminnet för denna enhet"
elif 'LONG TERM' in memory: text="den här enhetens långtidsminne"
elif 'SHORT TERM' in memory: text="korttidsminnet för denna enhet"
if sureBool == False:
toolAnswer = "[PRIO]"+sweOverrideMemPrompt+f"Du måste be användaren igen, i ditt nästa meddelande, om godkännande om du förstått rätt att {text} föredras att bli raderat. Be användaren att svara ja eller nej."
else:
toolAnswer = "[PRIO]"+sweOverrideMemPrompt+f'Du måste be användaren, i ditt nästa meddelande, om godkännande om du förstått rätt att {text} föredras att bli raderat. Be användaren att svara ja eller nej.'
return toolAnswer
def userApprove(userPrompt):
approveMsgs = [{"role": "user", "content": userPrompt}]
approveFuncList = [
{
"name": "yes_or_no",
"description": 'Determine if the user said “yes” or “no” in any way (in Swedish or English). You must be certain, otherwise choose "UNSURE".',
"parameters": {
"type": "object",
"properties": {
"decision": {
"type": "string",
"enum": ["YES", "NO","UNSURE"]
},
},
"required": ["decision"]
}
}
]
approveResponse=generateResponse(approveMsgs, approveFuncList, 0.2, {"name": "yes_or_no"})
if approveResponse.get("function_call"): # Check if it wanted to use a function
functionArgs = json.loads(approveResponse["function_call"]["arguments"])
approveDecision = functionArgs.get("decision")
else:
approveDecision = 'UNSURE'
return approveDecision
def useTool(userPrompt):
global maxToolsPerPrompt, devMode, wolframAlpha, googleSearch
useTools = True
stringToolAnswers = ''
toolsUsedNum = 0
toolAnswers = []
toolMsgs = []
appendPrevSummations(toolMsgs)
toolMsgs.extend(history)
toolMsgs.append({'role':'user','content':userPrompt})
toolStartPrompt = (
"You can use the following tools to answer the user's latest message:\n"
+('{WOLFRAM ALPHA} Provides answers to mathematics, science, technology, society and culture, and weather (location-based).\n' if wolframAlpha else "")
+('{GOOGLE SEARCH} Can search for anything. Best used for up-to-date information.\n' if googleSearch else "")
+("{TEMP SENSOR} Gives current room temperature.\n" if MAX7219Lib else "")
+'{SIMPLE CALCULATOR} Answers simple calculations.\n'
+'{LOCATION} Provides current device location.\n'
+'{TIME & DATE} Access the current date and time.\n'
+"{SAVE MEMORY} ONLY if the user CLEARLY conveyed to want to save, remember or summarize current conversation to the device's long-term-memory.\n" # +"{SAVE MEMORY} ONLY if the user CLEARLY conveyed to want to save or summarize current conversation to the device's long-term-memory.\n"
+"{RESET MEMORY} If the user wants to reset/delete/clear the device's memory/database/messages, or are deciding on what memory to reset/delete.\n\n"
+"To use a tool, write {} with its name in braces. If you can answer without a tool, write {YES}.\n\n"
+'Examples:\n'
+('Weather info: {LOCATION}, {WOLFRAM ALPHA}\n' if wolframAlpha else "")
+'Greeting user: {YES}\n'
+("Obama's age to the power of 2: {TIME & DATE}, {WOLFRAM ALPHA}\n" if wolframAlpha else "")
+("Pewdiepie's height subtracted by Marzia's height: {GOOGLE SEARCH}, {SIMPLE CALCULATOR}\n" if googleSearch else "")
+("Holidays today: {TIME & DATE}, {GOOGLE SEARCH}\n" if googleSearch else "")
+"Memory deletion or clear database: {RESET MEMORY}\n"
+"Recall or remember info from long-term memory: {YES}\n"
+'Unsure/unnecessary: {YES}\n\n'
+'Choose a tool now within {}, nothing else.'
)
toolMsgs.append({'role':'system','content':toolStartPrompt})
response1=generateResponse(toolMsgs, temp=0.2, max_tokens=8) # Choose a tool
if devMode: print("response1:",response1)
# Extract the text in braces
match1 = re.search(r'\{(.+?)\}', response1)
if match1: response1 = match1.group(1)
toolMsgs.append({'role':'assistant','content':'{'+response1+'}'})
while useTools:
if 'SAVE MEMORY' in response1:
print(Fore.YELLOW+"Assistant used memory save tool:")
global saveMemStage
if saveMemStage == 1: # Did the user approve?
decision=userApprove(userPrompt)
if decision == 'YES':
print(Fore.YELLOW+"User accepted: Short-term memory will be saved shortly")
summarizeOutput = summarizeHistory(userPrompt)
toolAnswer = f'[PRIO]'+summarizeOutput
saveMemStage = 0
elif decision == 'NO':
print(Fore.YELLOW+"User canceled: Short-term memory will not be saved")
toolAnswer = ''
saveMemStage = 0
else: # UNSURE
toolAnswer=youSure('SAVE')
print(Fore.YELLOW+"Checking if the target is correct again...")
elif saveMemStage == 0: # Ask if the user is sure
print(Fore.YELLOW+"Target: save short-term memory")
toolAnswer=youSure('SAVE')
print(Fore.YELLOW+"Checking if the target is correct...")
saveMemStage = 1
toolAnswers.append(toolAnswer)
break # Use no more tools
else:
saveMemStage = 0
if 'RESET MEMORY' in response1:
print(Fore.YELLOW+"Assistant used memory reset tool:")
global rstMemStage
global memRstChoice
global overrideMemPrompt
global sweOverrideMemPrompt
if rstMemStage == 2: # Did the user approve?
decision=userApprove(userPrompt)
if decision == 'YES':
print(Fore.YELLOW+"User accepted deletion: Memory will be removed shortly")
if english == True:
if 'LONG TERM' in memRstChoice:
text = 'long'
mem = text
elif 'SHORT TERM' in memRstChoice:
text = 'short'
mem = text
elif 'BOTH' in memRstChoice:
text = 'both long term and short'
mem = 'both'
toolAnswer = f'[PRIO][{mem}]{overrideMemPrompt}You MUST inform the user in you next message that you are deleting {text} term memory from this device'
elif english == False:
if 'LONG TERM' in memRstChoice:
text = 'lång'
mem = 'long'
elif 'SHORT TERM' in memRstChoice:
text = 'kort'
mem = 'short'
elif 'BOTH' in memRstChoice:
text = 'både lång- och kort'
mem = 'both'
toolAnswer = f'[PRIO][{mem}]{sweOverrideMemPrompt}Du måste informera användaren i ditt nästa meddelande att du raderar {text}tidsminnet från denna enheten'
rstMemStage = 0
memRstChoice = ''
elif decision == 'NO':
print(Fore.YELLOW+"User canceled deletion: Memory will not be removed")
toolAnswer = ''
rstMemStage = 0
memRstChoice = ''
else: # User didn't specify yes or no, decision == 'UNSURE'
toolAnswer=youSure(memRstChoice, False)
print(Fore.YELLOW+"Checking if the target is correct, again...")
elif rstMemStage == 0: # Check what memory should be deleted
print(Fore.YELLOW+"Checking what memory should be deleted...")
rstMemMsgs = []
rstMemMsgs.extend(history)
rstMemMsgs.append({'role':'user','content':userPrompt})
memRstFuncDesc = (
'Determine what memory the user wants to reset/delete from this device (in Swedish or English). '
+'Or is it wanted at all?\n\n'
+'LONG TERM: Older conversations before this one\n'
+'SHORT TERM: The current conversation\n'
+'BOTH: Both long term and short term\n'
+'NOTHING: The user does not want to reset/delete anything\n'
+'UNSURE: If the user has not specified, or only said to delete “memory” or “minnet”\n\n'
+'Choice examples:\n'
+'”Långtidsminnet” = LONG TERM\n'
+'”Korttidsminnet” = SHORT TERM\n'
+'”Vill inte” / ”inget av dem” = NOTHING\n\n'
+'You must be certain, otherwise choose “UNSURE”.'
)
memRstFunc = [
{
"name": "memory_reset_choice",
"description": memRstFuncDesc,
"parameters": {
"type": "object",
"properties": {
"choice": {
"type": "string",
"enum": ["LONG TERM", "SHORT TERM","BOTH","NOTHING","UNSURE"]
},
},
"required": ["choice"]
}
}
]
memRstResponse=generateResponse(rstMemMsgs, memRstFunc, 0.2, {"name": "memory_reset_choice"})
if memRstResponse.get("function_call"): # Check if it wanted to use a function
functionArgs = json.loads(memRstResponse["function_call"]["arguments"])
memRstChoice = functionArgs.get("choice")
else:
memRstChoice = 'UNSURE'
rstMemMsgs.append({'role':'assistant','content':'{'+memRstChoice+'}'})
if 'LONG TERM' in memRstChoice or 'SHORT TERM' in memRstChoice or 'BOTH' in memRstChoice: # We know what memory should be deleted
rstMemStage = 1 # Go to the next stage
elif 'NOTHING' in memRstChoice:
print(Fore.YELLOW+"Nothing will be deleted.")
toolAnswer = ''
rstMemStage = 0
memRstChoice = ''
else: # It doesn't know
print(Fore.YELLOW+"Assistant doesn't know, but will ask...")
toolAnswer = (
'[PRIO]'
+'You must ask the user, in your next message, to specify what memory on this device that is preferred to be deleted. '
+'Long term memory, short term memory, or both?\n\n'
+'Long term memory: Older conversations before this one\n'
+'Short term memory: The current conversation'
)
if rstMemStage == 1: # We know what memory should be deleted, now ask if the user is sure to do this
if 'LONG TERM' in memRstChoice:
print(Fore.YELLOW+"Targeting: Long-term memory")
toolAnswer=youSure('LONG TERM')
elif 'SHORT TERM' in memRstChoice:
print(Fore.YELLOW+"Targeting: Short-term memory")
toolAnswer=youSure('SHORT TERM')
elif 'BOTH' in memRstChoice:
print(Fore.YELLOW+"Targeting: Both long-term and short-term memory")
toolAnswer=youSure('BOTH')
print(Fore.YELLOW+"Checking if the target is correct...")
rstMemStage = 2
toolAnswers.append(toolAnswer)
break # Use no more tools
else:
rstMemStage = 0
memRstChoice = ''
if wolframAlpha and ('WOLFRAM ALPHA' in response1):
wolframStartPrompt = ("Enter a question for Wolfram Alpha within {}.\n\n"
+"Examples:\n"
+"'sqrt(2)+5^3' -> {sqrt(2)+5^3}"
+"'weather Luleå Sweden' -> {weather Luleå Sweden}\n"
+"'Obama's age to the power of two' -> {(Obama's age in years)^2}\n\n"
+"Make sure to only include the question within the curly braces {}.")
toolMsgs.append({'role':'system','content':wolframStartPrompt})
response2=generateResponse(toolMsgs)
# Extract the response in braces
match2 = re.search(r'\{(.+?)\}', response2)
if match2: response2 = match2.group(1)
toolMsgs.append({'role':'assistant','content':'{'+response2+'}'})
try:
answer = wolfram.run(response2)
toolAnswer = 'Assistant used Wolfram Alpha tool:\nQuestion: {}\n{}'.format(response2, answer)
toolMsgs.append({'role':'system','content':'Wolfram Alpha answer:\n{}'.format(answer)})
except Exception as e:
e_str = str(e)
if "Invalid appid" in e_str:
print(Style.BRIGHT+Fore.RED+"WARNING: Invalid Wolfram Alpha App ID. Please provide an App ID in apiKeys.py to use Wolfram Alpha.")
toolAnswer = "Assistant tried to use Wolfram Alpha tool, but the user hasn't provided a correct App ID. Wolfram Alpha cannot be used until the user provides a valid App ID."
wolframAlpha = False
else:
toolAnswer = 'Assistant tried to use Wolfram Alpha tool, but a problem occured'
toolMsgs.append({'role':'system','content':toolAnswer})
elif googleSearch and ('GOOGLE SEARCH' in response1):
googleStartPrompt = "Enter a web search engine query for Google Search within {}. For example, to search for 'how old is Obama' type {Obama age}, and Google will find results for you. Now write the query in curly braces {}." # Make sure to only include the question within the curly braces {}.
toolMsgs.append({'role':'system','content':googleStartPrompt})
query=generateResponse(toolMsgs)
# Extract the response in braces
match2 = re.search(r'\{(.+?)\}', query)
if match2: query = match2.group(1)
toolMsgs.append({'role':'assistant','content':'{'+query+'}'})
try:
service = build("customsearch", "v1", developerKey=googleCustomSearchAPI) # Init google search engine
response = (service.cse().list(q=query,cx=googleSearchEngineID,).execute()) # Call google search api
results = response['items']
answer = ""
for result in results:
answer += "Link: " + result['link'] + "\n"
answer += "Title: " + result['title'] + "\n"
answer += "Content: " + result['snippet'] + "\n\n"
toolAnswer = 'Assistant used Google Search tool:\n\nQuery: {}\n\nSearch results:\n\n{}'.format(query, answer)
toolMsgs.append({'role':'system','content':'Google Search results:\n\n{}'.format(answer)})
except Exception as e:
e_str = str(e)
if "API key not valid" in e_str:
print(Style.BRIGHT+Fore.RED+"WARNING: Invalid Google Custom Search API Key (developerKey). Please provide an API Key in apiKeys.py to use Google Search.")
toolAnswer = "Assistant tried to use Google Search tool, but the user hasn't provided a correct API Key. Google Search cannot be used until the user provides a valid API Key."
googleSearch = False
elif "Request contains an invalid argument" in e_str:
print(Style.BRIGHT+Fore.RED+"WARNING: Invalid Google Search Engine ID (cx). Please provide a Search Engine ID in apiKeys.py to use Google Search.")
toolAnswer = "Assistant tried to use Google Search tool, but the user hasn't provided a correct Search Engine ID. Google Search cannot be used until the user provides a valid ID."
googleSearch = False
else:
toolAnswer = 'Assistant tried to use Google Search tool, but a problem occured'
toolMsgs.append({'role':'system','content':toolAnswer})
elif 'SIMPLE CALCULATOR' in response1:
calcStartPrompt = "Enter an equation for me to calculate within {}. For example, to calculate 'sqrt(2)+5^3', type {sqrt(2)+5^3} and I will calculate it for you. Make sure to only include the equation within the curly braces {}."
toolMsgs.append({'role':'system','content':calcStartPrompt})
response2=generateResponse(toolMsgs)
# Extract the response in braces
match2 = re.search(r'\{(.+?)\}', response2)
if match2: response2 = match2.group(1)
toolMsgs.append({'role':'assistant','content':'{'+response2+'}'})
evalCalc = response2.replace("^", "**")
try:
answer = eval(evalCalc, math_functions)
toolAnswer = 'Assistant used calculator tool: {} = {}'.format(response2, answer)
toolMsgs.append({'role':'system','content':'Calculator answer: {}'.format(answer)})
except:
toolAnswer = 'Assistant tried to use calculator tool, but a problem occured'
toolMsgs.append({'role':'system','content':toolAnswer})
elif 'TIME & DATE' in response1:
answer = datetime.datetime.now().strftime('%H:%M:%S %Y-%m-%d')
toolAnswer = 'Assistant used time & date tool: {}'.format(answer)
toolMsgs.append({'role':'system','content':'Time & date answer: {}'.format(answer)})
elif 'LOCATION' in response1:
answer = f"City: {city}, Country: {country}, Postal: {postal}, Timezone: {timezone}"
toolAnswer = 'Assistant used location tool:\n{}'.format(answer)
toolMsgs.append({'role':'system','content':'Location answer:\n{}'.format(answer)})
elif ('TEMP SENSOR' in response1) and (MAX7219Lib == True):
answer = getTemp() + " degrees Celsius"
toolAnswer = 'Assistant used temperature sensor: The temperature in this room is {}'.format(answer)
toolMsgs.append({'role':'system','content':'Temp sensor results:\nTemp in this room is {}'.format(answer)})
else:
toolAnswer = ''
if toolAnswer != '':
print(Fore.YELLOW+toolAnswer)
toolAnswers.append(toolAnswer)
else: useTools = False
toolsUsedNum += 1
if (toolsUsedNum >= maxToolsPerPrompt): useTools = False
if useTools:
toolEndPrompt = (
'If you are certain that you can give a good, relevant and accurate answer to the user’s question without using another tool, write {YES}, nothing else. '
+'To choose a tool, write {} with its name in braces. For example, to use Google Search, write {GOOGLE SEARCH}. '
+'It can be good to use a tool to gather information, and then use another to calculate or search for anything based on the previous info. '
+'Now you MUST choose with {} only, nothing else.'
)
toolMsgs.append({'role':'system','content':toolEndPrompt})
response3=generateResponse(toolMsgs) # Choose another tool, or end loop
# Extract the text in braces
match3 = re.search(r'\{(.+?)\}', response3)
if match3: response3 = match3.group(1)
toolMsgs.append({'role':'assistant','content':'{'+response3+'}'})
if ('YES' in response3): useTools = False
else: response1 = response3
if toolAnswer != '': stringToolAnswers = '\n\n'.join(toolAnswers)
useTools = True
toolsUsedNum = 0
toolAnswers = []
toolMsgs = []
return stringToolAnswers
def detectLanguage(text):
print(Fore.YELLOW+"Detecting language...")
lanMsgs = [{'role':'user','content':text}]
lanFuncList = [
{
"name": "english_or_swedish",
"description": "Based on the user message, determine if it is English or Swedish",
"parameters": {
"type": "object",
"properties": {
"decision": {
"type": "string",
"enum": ["ENGLISH", "SWEDISH"]
},
},
"required": ["decision"]
}
}
]
lanResponse=generateResponse(lanMsgs, lanFuncList, 0.2, {"name": "english_or_swedish"})
if lanResponse.get("function_call"): # Check if it wanted to use a function
functionArgs = json.loads(lanResponse["function_call"]["arguments"])
lanDecision = functionArgs.get("decision")
else:
lanDecision = 'ENGLISH'
if lanDecision == "SWEDISH": return "sv"
else: return "en"
def beep():
os.system('play -nq -t alsa synth 0.34 sine 30 vol 0.05')
def playAudio(language):
# Play sound
if vlcLib == True: # For Raspberry Pi
if wakeSpeaker:
beep() # Beep to wake up the speaker, if it turns off automatically
time.sleep(0.3)
instance = vlc.Instance() # create instance of VLC
player = instance.media_player_new() # create new MediaPlayer object
media = instance.media_new(textToSpeechFilePath) # load the audio file
player.set_media(media) # set the media for the player
def on_end_reached(event):
player.stop()
instance.release()
# register the callback function with the event manager
event_manager = player.event_manager()
event_manager.event_attach(vlc.EventType.MediaPlayerEndReached, on_end_reached)
player.audio_set_volume(100) # Set the volume to 100%
player.play() # play the media
elif pygameLib == True: # For Windows
if language == "sv": # Swedish
sound = pygame.mixer.Sound(textToSpeechFilePath) # Load the sound file
duration = sound.get_length() * 1000 # Convert to milliseconds
channel = sound.play()
pygame.time.wait(int(duration)) # Waiting until finished talking
channel.stop()
else: # English
pygame.mixer.music.load(textToSpeechFilePath)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pygame.time.Clock().tick(10)
# Put stuff here that should run while playing speech
pygame.mixer.music.stop()
pygame.quit()
pygame.init()
pygame.mixer.init()
def textToSpeech(text, language):
global elevenLabs
print("Generating text-to-speech...")
if language == "sv": # Swedish
client = texttospeech.TextToSpeechClient()
input_text = texttospeech.SynthesisInput(text=text)
if googleTTS_gender == "MALE": ssml_gender=texttospeech.SsmlVoiceGender.MALE
else: ssml_gender=texttospeech.SsmlVoiceGender.FEMALE
voice = texttospeech.VoiceSelectionParams(
language_code="sv-SE",
name=googleTTS_name,
ssml_gender=ssml_gender,
)
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.MP3
)
response = client.synthesize_speech(
request={"input": input_text, "voice": voice, "audio_config": audio_config}
)
with open(textToSpeechFilePath, "wb") as out:
out.write(response.audio_content)
else: # English
if elevenLabs:
headers = {
"Accept": "audio/mpeg",
"xi-api-key": XI_API_KEY,
"Content-Type": "application/json"
}
data = {
"text": text,
"voice_settings": {
"stability": STABILITY,
"similarity_boost": SIMILARITY_BOOST
}
}
try:
if vlcLib == True: # For Raspberry Pi
response = requests.post(f"https://api.elevenlabs.io/v1/text-to-speech/{VOICE_ID}", json=data, headers=headers)
with open(textToSpeechFilePath, "wb") as f:
f.write(response.content)
elif pygameLib == True: # For Windows
response = requests.post(f"https://api.elevenlabs.io/v1/text-to-speech/{VOICE_ID}/stream", json=data, headers=headers, stream=True)
with open(textToSpeechFilePath, 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
except Exception as e: # There was an error
print(Style.BRIGHT+Fore.RED+f"ElevenLabs Error:\n{e}")
print("Using offline text-to-speech instead...")
offlineTextToSpeech(text)
return None
else: # elevenLabs == False
offlineTextToSpeech(text)
return None
try:
playAudio(language)
except Exception as e:
print(Style.BRIGHT+Fore.RED+f"Error trying to play audio:\n{e}")
if elevenLabs == True: # If ElevenLabs is used, this error likely occured because of wrong API Key
print(Style.BRIGHT+Fore.RED+"WARNING: This error likely occured because the ElvenLabs API Key is wrong. Please provide an API Key in apiKeys.py to use ElevenLabs.")
print("Using offline text-to-speech instead...")
offlineTextToSpeech(text)
elevenLabs = False
return None
return textToSpeechFilePath
def offlineTextToSpeech(text):
tts.say(text)
tts.iterate()
def generateResponse(openAImsgs, funcList=None, temp=None, functionCall=None, stream=None, max_tokens=None):
global kwargs
kwargs = {}
if funcList is not None:
kwargs['model'] = "gpt-3.5-turbo-0613"
kwargs['functions'] = funcList
else:
kwargs['model'] = "gpt-3.5-turbo"
kwargs['messages'] = openAImsgs
if temp is not None:
kwargs['temperature'] = temp
if functionCall is not None:
kwargs['function_call'] = functionCall
if stream is not None:
kwargs['stream'] = stream
if max_tokens is not None:
kwargs['max_tokens'] = max_tokens
promptTokens = num_tokens_from_messages(openAImsgs, kwargs['model'])
if promptTokens > 3500:
print(Fore.RED+"OpenAI: Using 16k model instead of 4k, which is more expensive (too many tokens for 4k model)")
if funcList is not None:
kwargs['model'] = "gpt-3.5-turbo-16k-0613"
else:
kwargs['model'] = "gpt-3.5-turbo-16k"
if devMode:
if "gpt-3.5-turbo" in kwargs['model'] and "16k" not in kwargs['model']:
global totalCost
totalCost+=0.0015*(promptTokens/1000)
print(Fore.LIGHTMAGENTA_EX+"Approx. cost:",totalCost,"$")
# Wait until we can send another request to OpenAI without exceeding the max request rate
global lastOpenAIresponse
if (openAIdelay > 0) and ((openAIdelay - (time.time() - lastOpenAIresponse)) > 0):
print("Waiting", round(openAIdelay - (time.time() - lastOpenAIresponse)), "seconds... Free OpenAI account settings on.")
time.sleep(openAIdelay - (time.time() - lastOpenAIresponse))
openaiError = False
attempts = 3
retryTime = 5 # seconds
for attempt in range(attempts):
if openaiError:
print(Fore.RED+f"Retrying in {retryTime} seconds...")
time.sleep(retryTime)
try:
# Call OpenAI's API to generate a response
response = openai.ChatCompletion.create(
**kwargs
)
if (openAIdelay > 0): lastOpenAIresponse = time.time()
if funcList: # Functions
aiResponse = response['choices'][0]['message']
elif stream: # Streamed assistant answers
aiResponse = response
else: # Usual responses (some tools)
aiResponse = response['choices'][0]['message']['content']
if devMode:
print("openAImsgs:\n",openAImsgs)
print("aiResponse:\n",aiResponse)
except openai.error.Timeout as e:
#Handle timeout error, e.g. retry or log
print(Style.BRIGHT+Fore.RED+f"OpenAI API request timed out: {e}")
openaiError = True
except openai.error.APIError as e:
#Handle API error, e.g. retry or log
print(Style.BRIGHT+Fore.RED+f"OpenAI API returned an API Error: {e}")
openaiError = True
except openai.error.APIConnectionError as e:
#Handle connection error, e.g. check network or log
print(Style.BRIGHT+Fore.RED+f"OpenAI API request failed to connect: {e}")
openaiError = True
except openai.error.InvalidRequestError as e:
#Handle invalid request error, e.g. validate parameters or log
print(Style.BRIGHT+Fore.RED+f"OpenAI API request was invalid: {e}")
print(Style.BRIGHT+Fore.RED+"Messages sent to OpenAI:\n",openAImsgs)
openaiError = True
except openai.error.AuthenticationError as e:
#Handle authentication error, e.g. check credentials or log
print(Style.BRIGHT+Fore.RED+f"OpenAI API request was not authorized: {e}")
openaiError = True
except openai.error.PermissionError as e:
#Handle permission error, e.g. check scope or log
print(Style.BRIGHT+Fore.RED+f"OpenAI API request was not permitted: {e}")
openaiError = True
except openai.error.RateLimitError as e:
#Handle rate limit error, e.g. pace requests or log
print(Style.BRIGHT+Fore.RED+f"OpenAI API request hit rate limit: {e}")
openaiError = True
except openai.error.ServiceUnavailableError as e:
print(Style.BRIGHT+Fore.RED+f"OpenAI API service is unavailable: {e}")
openaiError = True
except Exception as e:
print(Style.BRIGHT+Fore.RED+f"OpenAI Error:\n{e}")
openaiError = True
else: # The try block ran without raising an exception
openaiError = False
break
else: # We failed all the retry attempts - deal with the consequences
print(Style.BRIGHT+Fore.RED+f"Could not generate a response from OpenAI even after {attempts} retry attempts. Please try again later or consider looking in to it.\nShort-term memory was deleted, to start from scratch.")
chatReset()
openaiError = True
if openaiError: aiResponse = ''
return aiResponse
def num_tokens_from_string(string: str, encoding_name: str) -> int:
encoding = tiktoken.encoding_for_model(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def num_tokens_from_messages(messages, model="gpt-3.5-turbo"):
"""Return the number of tokens used by a list of messages."""
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
print("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model in {
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-16k",
"gpt-4-0314",
"gpt-4-32k-0314",
"gpt-4-0613",
"gpt-4-32k-0613",
}:
tokens_per_message = 3
tokens_per_name = 1
elif model == "gpt-3.5-turbo-0301":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
elif "gpt-3.5-turbo" in model:
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613")
elif "gpt-4" in model:
print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
return num_tokens_from_messages(messages, model="gpt-4-0613")
else:
raise NotImplementedError(
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
)
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
def chatReset():
global messages
global history
global messageCount
messages.clear()
history.clear()
messageCount = 0
messageTokens = 0
def longTermMemoryReset():
with open(longTermMemoryPath, "r") as f:
data = json.load(f)
if "summations" in data:
with open(longTermMemoryPath, "w") as f:
json.dump({}, f)
def appendPrevSummations(memoryList):
# Load the JSON file
with open(longTermMemoryPath, "r") as f:
data = json.load(f)
# Extract previous summations from the JSON file and append them to memoryList
if "summations" in data: # If there are any summations
for summation in data["summations"]:
memoryList.append({"role": "system", "content": "Device memory from earlier conversations: "+summation})
def shortenSummation(summation):
pattern = re.compile(r'•(.*?)\n')
matches = pattern.findall(summation)
shorterSummation = '\n'.join(matches)
return shorterSummation
def summationStringToList(summationString, newOrOld):
namePattern = re.compile(r"User's name: (.*?)\n")
nameList = namePattern.findall(summationString)
if newOrOld == "OLD":
infoNtimePattern = re.compile(r"Info timestamps:\n(.+?)\n\n", re.DOTALL)
infoNtimeList = infoNtimePattern.findall(summationString)
for name in range(len(nameList)):
infoNtimeList[name] += "\n"
fullInfoList = []
if newOrOld == "NEW": summationString += "\n\n"
for name in range(len(nameList)):
if newOrOld == "OLD":
infoPattern = re.compile(r"\n-(.+?)(?=\n)", re.DOTALL)