Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from PIL import Image
- import numpy as np
- import time
- import cv2 # for the camera
- from gpiozero import LED
- import io
- from picamera import PiCamera
- #returns all the x values for which the rgb values add up to 500 or more (white)
- def GetWhitesInImage(image, lineNumber):
- #cv2.imwrite("Photo.jpg", image)
- #rgb_im = image.convert('RGB')
- #print((image[0, 0]))
- rgb_im = image.RGB()
- #width = rgb_im.size[0]
- #r = np.zeros(width) # np.ones(rgb_im.size[0]) #create an array with the width of the image
- #g = np.zeros(width)
- #b = np.zeros(width)
- #rgbList = []
- output = []
- #Not only select white values on the sum of the rgb, also see if the r, g and b values are somewhat the same
- #fill the array with the rgb values
- for i in range(0, width):
- #if i > 478:
- # print("WATCH THIS")
- r, g, b = rgb_im.getpixel((i,lineNumber))
- #b[i], r[i], g[i] = image[lineNumber, i]
- #b, r, g = image[lineNumber, i]
- #rgbList.append((int(r)+int(g)+int(b)))
- #if r[i] + g[i] + b[i] > 400: r < (g + b)*0.75 and
- if (int(r) + int(g) + int(b)) > whiteThreshold:
- output.append(i)
- return output
- #determines from the inputted array with x coords two sets of points that are the two lines
- def DetermineLines(whiteXCoords):
- #get the average of the values
- #average = np.average((whiteXCoords))
- #maybe use the middle instead of the average
- #divide the array into two arrays based on if the points lay below the average or above
- left = []
- right = []
- for i in whiteXCoords:
- #if i < average:
- if i > middle: #this is flipped because the cam will mirror the image
- left.append((i))
- else:
- right.append((i))
- if len(left) == 0:
- left.append(0)
- if len(right) == 0:
- right.append(0)
- #calculate the spread of each of the lines
- minLeft, maxLeft = np.min((left)), np.max((left))
- minRight, maxRight = np.min((right)), np.max((right))
- #get the percentage of white pixels in the area
- percentLeft = np.size((left))/(maxLeft - minLeft + 1)
- # if the percentage is above 0.8, it is a line, if it's lower it's not.
- # so determine what value affects the percentage the most and remove it
- #do this 10 times to see if the percentage improves
- while maxLeft - minLeft > 30:
- averageLeft = np.average((left))
- # if the percentage is higher than 0.8 break
- # when on the track the percentage won't be 1 when on the track, on the metaforum floor it might be 1
- #so I removed the 1 constraint
- #if 1 > percentLeft > 0.8:
- if percentLeft > 0.8:
- break
- # if the averageLeft - minLeft > maxLeft - averageLeft, then the lowest value is further away from the average
- # than the highest value. so remove that value
- if averageLeft - minLeft > maxLeft - averageLeft:
- left.remove(minLeft)
- minLeft = np.min(left)
- else:
- left.remove(maxLeft)
- maxLeft = np.max((left))
- # one value is removed, the min and maxes are recalculated, recalculate the average
- percentLeft = np.size((left))/(maxLeft - minLeft + 1)
- #get the percentage of white pixels in the area
- percentRight = np.size((right))/(maxRight - minRight + 1)
- # if the percentage is above 0.8, it is a line, if it's lower it's not.
- # so determine what value affects the percentage the most and remove it
- # do this untill the group becomes too small
- while maxRight - minRight > 30:
- averageRight = np.average((right))
- # if the percentage is higher than 0.8 break
- if 1 > percentRight > 0.8:
- break
- # if the averageLeft - minLeft > maxLeft - averageLeft, then the lowest value is further away from the average
- # than the highest value. so remove that value
- if averageRight - minRight > maxRight - averageRight:
- right.remove(minRight)
- minRight = np.min(right)
- else:
- right.remove(maxRight)
- maxRight = np.max((right))
- # one value is removed, the min and maxes are recalculated, recalculate the average
- percentRight = np.size((right)) / (maxRight - minRight + 1)
- leftLocation = minLeft + ((maxLeft - minLeft) / 2)
- rightLocation = minRight + ((maxRight - minRight) / 2)
- if percentLeft < 0.8:
- leftLocation = 0
- if percentRight < 0.8:
- rightLocation = 0
- #if the percentages are too low, the location 0 is outputted, so the program knows the detection went wrong
- return leftLocation, rightLocation
- def TakePicture():
- s, img = cam.read()
- camera.capture(stream, format='jpeg')
- stream.seek(0)
- return Image.open(stream)
- if s:
- cv2.namedWindow("cam-test")#,CV_WINDOW_AUTOSIZE)
- #cv2.imshow("cam-test",img)
- #cv2.waitKey()
- #cv2.imwrite("Photo" + ".jpg", img)
- #cv2.destroyWindow("cam-test")
- return (img)
- else:
- print("Image not taken")
- return
- imagePaths = ["Position0.png", "Position1.png", "Position2.png", "Position3.png", "Position4.png", "Position5.png",
- "Position6.png", "Position7.png", "Position8.png", "Position9.png", "Position10.png", "Position11.png",
- "Position12.png", "Position13.png", "Position14.png", "Position15.png", "Position16.png", "Position17.png",
- "Position18.png", "Position19.png", "Position20.png", "Position21.png", "Position22.png", "Position23.png",
- "Position24.png", "Position25.png", "Position26.png", "Position27.png", "Position28.png", "Position29.png",
- "Position30.png", "Position31.png", "Position32.png", "Position33.png", "Position34.png", "Position35.png",
- "Position36.png", "Position37.png", "Position38.png", "Position39.png", "Position40.png", "Position41.png",
- "Position42.png", "Position43.png", "Position44.png", "Position45.png", "Position46.png", "Position47.png",
- "Position48.png", "Position49.png", "Position50.png", "Position51.png", "Position52.png", "White.png"]
- #position 9: on the left line
- #position 26: is the middle
- #position 44: on the right line
- imageOrder = [ 26,25,24,23,22,21,20,19,18,17,
- 16,15,14,13,12,11,10,11,12,13,
- 14,15,16,17,18,19,20,21,22,23,
- 24,25,26,27,28,29,30,31,32,33,
- 34,35,36,37,38,39,40,41,42,43]
- # initialize the camera
- cam = cv2.VideoCapture(0) # 0 -> index of camera
- whiteThreshold = 550 #when capturing black/white make this 550, when capturing a track make this 400
- # when taking a picture with my webcam of a picture of white lines on meta's gray floor the white was extremely white, the
- # gray had a total rgb of 500 or so. so this has to be way higher for the program to distinct between gray and white
- # the track had a total of 200/300 on red, and a total of 400/500 on white, so that is a good treshold for that situation
- #get one image to set the sized correctly
- #myImage = Image.open(imagePaths[0])
- myImage = TakePicture()
- width = int(cam.get(3)) # 3 is the width, 4 is the height
- middle = width/2
- #middle = myImage.size[0]/2
- deviation = width/20 #devide the image in n regions. used for the buzzing. when the regions get smaller
- #divide by a higher number, the later the buzzing starts
- regions = [middle - 4 * deviation, middle - 3 * deviation, middle - 2 * deviation, middle - 1 * deviation]
- #lineToCheck = np.round(cam.get(4)/2) # 3 is the width, 4 is the height
- lineToCheck = int(cam.get(4)/2) # 3 is the width, 4 is the height
- leftPositions = [0 for _ in range(10)] # np.zeros(10) #array that contains all the positions of the left line
- leftVelocity = [0 for _ in range(10)] # np.zeros(10) #array that contains all the velocities of the left line
- rightPositions = [0 for _ in range(10)] # np.zeros(10) #array that contains all the positions of the right line
- rightVelocity = [0 for _ in range(10)] # np.zeros(10) #array that contains all the velocities of the left line
- leftBuzzingLevel = 0 #the vibrating level of the left vibration motor, {0, 1, 2, 3}, 0 being off
- rightBuzzingLevel = 0 #the vibrating level of the right vibration motor, {0, 1, 2, 3}, 0 being off
- leftVibrationMotor = LED(18)
- rightVibrationMotor = LED(19)
- #print("-------------------------------------------------------------------")
- outputString = ""
- print("Starting program")
- stream = io.BytesIO()
- camera = picamera.PiCamera()
- camera.start_preview()
- time.sleep(2)
- #for j in range(len(imageOrder)):
- j = 0
- while True:
- j += 1
- print("Locations: ", leftLine, " | ", rightLine)
- print("predict: ", predictedLeft, " | ", predictedRight)
- #print("leftMotor: ", leftVibrationMotor, "rightMotor: ", rightVibrationMotor)
- time.sleep(1)
- #load image (take image)
- #myImage = Image.open(imagePaths[imageOrder[j]])
- myImage = TakePicture()
- #gets all the x values for white pixels
- whiteXCoordsInImage = GetWhitesInImage(myImage, lineToCheck)
- #get the location of the left line and right line
- leftLine, rightLine = DetermineLines((whiteXCoordsInImage))
- leftPositions.append(leftLine)
- leftPositions.pop(0)
- if rightLine == 0:
- rightLine = width
- rightPositions.append((width) - rightLine) #with this the right position gets translated to the left and flipped
- #so for both left and right lines the thing is: the further to the right, the worse (both comming from the left)
- rightPositions.pop(0)
- #here the leftPositions and rightPositions are filled with the last 10 positions
- #determine the velocity (to the right or left)
- if leftPositions[-1] != 0 and leftPositions[-2] != 0:
- leftVelocity.append(leftPositions[-1] - leftPositions[-2])
- else:
- leftVelocity.append(0)
- leftVelocity.pop(0)
- if rightPositions[-1] != 0 and rightPositions[-2] != 0:
- rightVelocity.append(rightPositions[-1] - rightPositions[-2])
- else:
- rightVelocity.append(0)
- rightVelocity.pop(0)
- leftVelocityAsArray = np.asarray(leftVelocity)
- predictedLeft = leftPositions[-1] + np.average((leftVelocityAsArray[leftVelocityAsArray != 0]))
- rightVelocityAsArray = np.asarray(rightVelocity)
- predictedRight = rightPositions[-1] + np.average((rightVelocityAsArray[rightVelocityAsArray != 0]))
- #predicted is nan if every position is 0, so check for nan
- if np.isnan((predictedLeft)) or np.isnan((predictedRight)):
- rightBuzzingLevel = 5
- rightVibrationMotor.on()
- leftBuzzingLevel = 5
- leftVibrationMotor.on()
- #print(j)#, " - ", imagePaths[imageOrder[j]])
- #print("Left:")
- #print("Locations: ", leftPositions)
- #print("Velocity: ", leftVelocity[-1])
- #print()
- #print("Right:")
- #print("Locations: ", rightPositions)
- #print("Velocity: ", rightVelocity[-1])
- #print()
- #print("Buzzing Levels", leftBuzzingLevel, " | ", rightBuzzingLevel)
- #print("Location: (", leftPositions[-1], ", ", leftPositions[-2], ")")
- #print("Buzzing Levels", leftBuzzingLevel, " | ", rightBuzzingLevel)
- #print("-------------------------------------------------------------------")
- #now = time.time()
- #outputString += "image: " + imagePaths[imageOrder[j]] + " | " + \
- # " Buzzing: " + str(leftBuzzingLevel) + " | " + str(rightBuzzingLevel) + " | " + \
- # "Time: " + str(now) + " Detection Error" + "\n"
- continue
- #check for each regions if the predicted lines is inside. if not set the according buzzing level
- #leftBuzzingLevel = 0
- #for i in range(0,len(regions)):
- # if predictedLeft < regions[i]:
- # break
- # else:
- # leftBuzzingLevel = i
- #rightBuzzingLevel = 0
- #for i in range(0,len(regions)):
- # if predictedRight < regions[i]:
- # break
- # else:
- # rightBuzzingLevel = i
- #leftBuzzingLevel = 0
- #for i in range(0, len(regions)):
- # if (predictedLeft < regions[i]):
- # leftBuzzingLevel = i
- # else:
- # break
- #rightBuzzingLevel = 0
- #for i in range(0, len(regions)):
- # if (predictedRight < regions[i]):
- # rightBuzzingLevel = i
- # else:
- # break
- if predictedLeft > regions[1]:
- leftBuzzingLevel = 1
- leftVibrationMotor.on()
- else:
- leftBuzzingLevel = 0
- leftVibrationMotor.off()
- if predictedRight > regions[1]:
- rightBuzzingLevel = 1
- rightVibrationMotor.on()
- else:
- rightBuzzingLevel = 0
- rightVibrationMotor.off()
- #leftBuzzingLevel = 0
- #if predictedLeft > regions[3]:
- # leftBuzzingLevel = 1
- #rightBuzzingLevel = 0
- #if predictedRight > regions[3]:
- # rightBuzzingLevel = 1
- #print(j)#, " - ", imagePaths[imageOrder[j]])
- #print("Left:")
- #print("Locations: ", leftPositions)
- leftPositionsArray = np.asarray((leftPositions))
- leftVelocityArray = np.asarray((leftVelocity))
- #print("Current location: ", leftLine, " | Average: ", np.average(leftPositionsArray[leftPositionsArray > 0]),
- # " | Avg velocity: ",np.average(leftVelocityArray[leftVelocityArray != 0]))
- #print("Prediction: ", predictedLeft)
- #print()
- #print("Right:")
- #print("Locations: ", rightPositions)
- rightPositionsArray = np.asarray((rightPositions))
- rightVelocityArray = np.asarray((rightVelocity))
- #print("Current location: ", rightLine, " | Average: ", np.average(rightPositionsArray[rightPositionsArray > 0]),
- # " | Avg velocity: ", np.average(rightVelocityArray[rightVelocityArray != 0]))
- #print("Prediction: ", predictedRight)
- #print()
- #print("Buzzing Levels", leftBuzzingLevel, " | ", rightBuzzingLevel)
- #print("Location: (", leftPositions[-1], ", ", leftPositions[-2], ")")
- #print("Buzzing Levels", leftBuzzingLevel, " | ", rightBuzzingLevel)
- now = time.time()
- #outputString += "image: " + imagePaths[imageOrder[j]] + " | " + \
- # " Buzzing: " + str(leftBuzzingLevel) + " | " + str(rightBuzzingLevel) + " | " + \
- # "Time: " + str(now) + "\n"
- #print("-------------------------------------------------------------------")
- with open('Data.txt', 'w') as f:
- f.write(outputString)
- f.close()
- camera.stop_preview()
- print("Klaaar")
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement