Flow Chart for OCR Detection

Flow Chart for OCR Detection


Code:
from picamera.array import PiRGBArray
from picamera import PiCamera
import socket
import sys
import cv2
import numpy as np
import time
import pickle
from io import StringIO
from multiprocessing.connection import Client
import serial

front_left = 0
front_center = 0
front_right = 0
back_left = 0
back_center = 0
back_right = 0
compass = 0
speed = 0
i = 0
speed_limit = "50"

# Initialize Camera
#camera = cv2.VideoCapture(0)
camera = PiCamera()
camera.resolution = (320, 240)
camera.framerate = 90
rawCapture = PiRGBArray(camera, size=(320, 240))

# Create a TCP/IP socket
#sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

# Connect the socket to the port where the server is listening
server_address = ('192.168.0.101', 10000)
print('connecting to {} port {}'.format(*server_address))
#sock.connect(server_address)
client = Client(server_address)

#Serial from Arduino
ser = serial.Serial("/dev/ttyAMA0", baudrate=115200)

# KNearest
samples = np.loadtxt('generalsamples_sign.data',np.float32)
responses = np.loadtxt('generalresponses_sign.data',np.float32)
responses = responses.reshape((responses.size,1))
offset = 2

model = cv2.ml.KNearest_create()
model.train(samples,cv2.ml.ROW_SAMPLE,responses)

for cap in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
#while True:
    # Capture frame-by-frame
    #ret, frame = camera.read()
    frame = cap.array
    #frame = cv2.resize(frame, (640, 480))
    [depth, width, height] = frame.shape[::-1]
    frame_def = frame.copy()
    
    ########### Sign Detect New One
    sign_finding = 2.21*frame[:,:,2] - frame[:,:,1] - frame[:,:,0]
    sign_finding = np.where(sign_finding > 60, sign_finding, 0)
    sign_finding = np.uint8(np.abs(sign_finding))

    sign_t,sign_contours,sign_hierarchy = cv2.findContours(sign_finding,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    
    max_x = 0
    max_y = 0
    min_x = width
    min_y = height
    select_area = 999999
    for cnt in sign_contours:
        if cv2.contourArea(cnt)>300:
            [x,y,w,h] = cv2.boundingRect(cnt)
            if cv2.contourArea(cnt) < select_area:
                select_area = cv2.contourArea(cnt)
                min_x = x
                min_y = y
                max_x = x + w
                max_y = y + h
            cv2.rectangle(frame_def,(x,y),(x+w,y+h),(0,255,0),2)

    cv2.rectangle(frame_def,(min_x,min_y),(max_x,max_y),(255,0,0),2)
    
    cv2.imshow('Sign Finding', sign_finding)
    cv2.imshow('Sign Crop', frame_def)
    
    if min_x < max_x or min_y < max_y:
        img_sign = frame[min_y:max_y, min_x:max_x, :]
        img_sign = cv2.cvtColor(img_sign, cv2.COLOR_BGR2GRAY)
        img_sign = cv2.GaussianBlur(img_sign,(5,5),0)
        thresh = cv2.adaptiveThreshold(img_sign,255,1,1,11,2)
        img_sign2, contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
        [width, height] = thresh.shape[::-1]
        new_speed_limit = []
        new_speed_limit_point = []
        speed_read = ""
        for cnt in contours:
            
            if cv2.contourArea(cnt)>85:
                [x,y,w,h] = cv2.boundingRect(cnt)
                if  w>10 and w<22 and h>20 and h<35 and h/w < 1.76: #28
                    print(h/w)
                    #max w = 95 min w = 76 max h = 135 min h = 132
                    y_start = y-offset
                    y_stop = y+h+offset
                    x_start = x-offset
                    x_stop = x+w+offset
                    if y_start < 0:
                        y_start = 0
                    if y_stop > height:
                        y_stop = height
                    if x_start < 0:
                        x_start = 0
                    if x_stop > width:
                        x_stop = width
                    cv2.rectangle(img_sign,(x_start,y_start),(x_stop,y_stop),(0,255,0),2)
                    roi = thresh[y_start:y_stop,x_start:x_stop]
                    roismall = cv2.resize(roi,(10,10))
                    roismall = roismall.reshape((1,100))
                    roismall = np.float32(roismall)
                    retval, results, neigh_resp, dists = model.findNearest(roismall, k = 5)
                    new_speed_limit.append(int(results[0][0]))
                    new_speed_limit_point.append(x)
                    print(results)
                    cv2.imshow("Img_sign",img_sign)
                    cv2.imshow("Thresh", thresh)
                    
                    
        #print(new_speed_limit)
        length_sign = len(new_speed_limit_point)
        for i in range(length_sign):
            position = new_speed_limit_point.index(min(new_speed_limit_point))
            new_speed_limit_point.pop(position)
            speed_read += str(new_speed_limit[position])
            new_speed_limit.pop(position)
        print(speed_read)
        if speed_read != "":
            speed_limit = speed_read
    
    # Prepare Data for send to PC
    img_str = cv2.imencode('.jpg', frame, [cv2.IMWRITE_JPEG_QUALITY,50])[1].tostring()
    ser.write(bytes(speed_limit + ",", 'utf-8'))
    car_data = str(ser.readline()).split(",")
    #print(car_data)
    
    
    #print(type(img_str))
    try:
        sendDict = {'image': img_str, 'front_left': car_data[0][2:], 'front_center': car_data[4], 'front_right': car_data[1], 'back_left': car_data[2], 'back_center': car_data[5], 'back_right': car_data[3], 'compass': car_data[8], 'speed': car_data[6], 'angle': car_data[7], 'x_value': car_data[9], 'y_value': car_data[10], 'z_value': car_data[11], 'speed_limit': car_data[12][:-5]}
        #sendDict = {'image': img_str}
        sendData = pickle.dumps(sendDict)
        #print(type(sendDict))
        #print(sendData)
        #sock.sendto(sendData, server_address)
        client.send(sendData)
        #print('Send')
        #cv2.imshow('frame',frame)
        rawCapture.truncate(0)
        #i+=1
        #if i == 10:
        #    front_left+=1
        #    front_center+=1
        #    front_right+=1
        #    back_left+=1
        #    back_center+=1
        #    back_right+=1
        #    i=0
        #compass+=0.01
        #speed+=0.01
        #time.sleep(0.025)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    except IndexError as e:
        rawCapture.truncate(0)
        print("Error")
        pass

#sock.close()
print('Closing socket')


Comments

Popular posts from this blog

Flow Chart for Car Tracking

Flow Chart for OCR Training