import aiy.audio import aiy.cloudspeech import aiy.voicehat import face_recognition import picamera import numpy as np #from people import known_face_encodings, known_face_names, known_name_sayings # Get a reference to the Raspberry Pi camera. # If this fails, make sure you have a camera connected to the RPi and that you # enabled your camera in raspi-config and rebooted first. camera = picamera.PiCamera() camera.resolution = (320, 240) output = np.empty((240, 320, 3), dtype=np.uint8) # Initialize some variables face_locations = [] face_encodings = [] alex_image = face_recognition.load_image_file("/home/pi/Desktop/known_people/Gobeler_Alex.jpg") alex_face_encoding = face_recognition.face_encodings(alex_image)[0] dustin_image = face_recognition.load_image_file("/home/pi/Desktop/known_people/Stephan_Dustin.jpg") dustin_face_encoding = face_recognition.face_encodings(dustin_image)[0] known_face_encodings = [alex_face_encoding,dustin_face_encoding] known_face_names = ["Alex","Dustin"] known_name_sayings = ["/home/pi/Desktop/known_name_sayings/hi alex.wav","/home/pi/Desktop/known_name_sayings/hi dustin.wav"] recognizer = aiy.cloudspeech.get_recognizer() recognizer.expect_phrase('Hi Alex') button = aiy.voicehat.get_button() led = aiy.voicehat.get_led() aiy.audio.get_recorder().start() def main(): led.set_state(aiy.voicehat.LED.ON) print('press the button') button.wait_for_press() led.set_state(aiy.voicehat.LED.OFF) print("Capturing image.") # Grab a single frame of video from the RPi camera as a numpy array camera.capture(output, format="rgb") print('Listening...') text = recognizer.recognize() if not text: print('Sorry could you repeat that') else: print('you said "', text, '"') if 'hi Alex' in text: # Find all the faces and face encodings in the current frame of video face_locations = face_recognition.face_locations(output) print("Found {} faces in image.".format(len(face_locations))) face_encodings = face_recognition.face_encodings(output, face_locations) # Loop over each face found in the frame to see if it's someone we know. for face_encoding in face_encodings: # See if the face is a match for the known face(s) match = face_recognition.compare_faces(known_face_encodings, face_encoding) name = "" if True in match: first_match_index = match.index(True) name = known_face_names[first_match_index] phrase = known_name_sayings[first_match_index] print("match!") aiy.audio.play_wave(phrase) break if __name__ == '__main__': main()