diff --git a/demo_images/notecard.png b/demo_images/notecard.png new file mode 100644 index 0000000..66b02a4 Binary files /dev/null and b/demo_images/notecard.png differ diff --git a/demo_images/shapes.png b/demo_images/shapes.png new file mode 100644 index 0000000..c0fc0f2 Binary files /dev/null and b/demo_images/shapes.png differ diff --git a/docs/images/auto_canny.png b/docs/images/auto_canny.png new file mode 100644 index 0000000..26d040b Binary files /dev/null and b/docs/images/auto_canny.png differ diff --git a/docs/images/perspective_transform.png b/docs/images/perspective_transform.png new file mode 100644 index 0000000..2ab0d87 Binary files /dev/null and b/docs/images/perspective_transform.png differ diff --git a/docs/images/sorting_contours.png b/docs/images/sorting_contours.png new file mode 100644 index 0000000..14d7928 Binary files /dev/null and b/docs/images/sorting_contours.png differ diff --git a/docs/images/url_to_image.png b/docs/images/url_to_image.png new file mode 100644 index 0000000..0c434e5 Binary files /dev/null and b/docs/images/url_to_image.png differ diff --git a/imutils/contours.py b/imutils/contours.py new file mode 100644 index 0000000..eaa36ab --- /dev/null +++ b/imutils/contours.py @@ -0,0 +1,43 @@ +# author: Adrian Rosebrock +# website: http://www.pyimagesearch.com + +# import the necessary packages +import cv2 + +def sort_contours(cnts, method="left-to-right"): + # initialize the reverse flag and sort index + reverse = False + i = 0 + + # handle if we need to sort in reverse + if method == "right-to-left" or method == "bottom-to-top": + reverse = True + + # handle if we are sorting against the y-coordinate rather than + # the x-coordinate of the bounding box + if method == "top-to-bottom" or method == "bottom-to-top": + i = 1 + + # construct the list of bounding boxes and sort them from top to + # bottom + boundingBoxes = [cv2.boundingRect(c) for c in cnts] + (cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes), + key=lambda b:b[1][i], reverse=reverse)) + + # return the list of sorted contours and bounding boxes + return (cnts, boundingBoxes) + +def label_contour(image, c, i, color=(0, 255, 0), thickness=2): + # compute the center of the contour area and draw a circle + # representing the center + M = cv2.moments(c) + cX = int(M["m10"] / M["m00"]) + cY = int(M["m01"] / M["m00"]) + + # draw the contour and label number on the image + cv2.drawContours(image, [c], -1, color, thickness) + cv2.putText(image, "#{}".format(i + 1), (cX - 20, cY), cv2.FONT_HERSHEY_SIMPLEX, + 1.0, (255, 255, 255), 2) + + # return the image with the contour number drawn on it + return image \ No newline at end of file diff --git a/imutils/contours.pyc b/imutils/contours.pyc new file mode 100644 index 0000000..00a1357 Binary files /dev/null and b/imutils/contours.pyc differ diff --git a/imutils/convenience.py b/imutils/convenience.py new file mode 100644 index 0000000..0c677a2 --- /dev/null +++ b/imutils/convenience.py @@ -0,0 +1,120 @@ +# author: Adrian Rosebrock +# website: http://www.pyimagesearch.com + +# import the necessary packages +import numpy as np +import urllib +import cv2 + +def translate(image, x, y): + # define the translation matrix and perform the translation + M = np.float32([[1, 0, x], [0, 1, y]]) + shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0])) + + # return the translated image + return shifted + +def rotate(image, angle, center=None, scale=1.0): + # grab the dimensions of the image + (h, w) = image.shape[:2] + + # if the center is None, initialize it as the center of + # the image + if center is None: + center = (w / 2, h / 2) + + # perform the rotation + M = cv2.getRotationMatrix2D(center, angle, scale) + rotated = cv2.warpAffine(image, M, (w, h)) + + # return the rotated image + return rotated + +def resize(image, width=None, height=None, inter=cv2.INTER_AREA): + # initialize the dimensions of the image to be resized and + # grab the image size + dim = None + (h, w) = image.shape[:2] + + # if both the width and height are None, then return the + # original image + if width is None and height is None: + return image + + # check to see if the width is None + if width is None: + # calculate the ratio of the height and construct the + # dimensions + r = height / float(h) + dim = (int(w * r), height) + + # otherwise, the height is None + else: + # calculate the ratio of the width and construct the + # dimensions + r = width / float(w) + dim = (width, int(h * r)) + + # resize the image + resized = cv2.resize(image, dim, interpolation=inter) + + # return the resized image + return resized + +def skeletonize(image, size, structuring=cv2.MORPH_RECT): + # determine the area (i.e. total number of pixels in the image), + # initialize the output skeletonized image, and construct the + # morphological structuring element + area = image.shape[0] * image.shape[1] + skeleton = np.zeros(image.shape, dtype="uint8") + elem = cv2.getStructuringElement(structuring, size) + + # keep looping until the erosions remove all pixels from the + # image + while True: + # erode and dilate the image using the structuring element + eroded = cv2.erode(image, elem) + temp = cv2.dilate(eroded, elem) + + # subtract the temporary image from the original, eroded + # image, then take the bitwise 'or' between the skeleton + # and the temporary image + temp = cv2.subtract(image, temp) + skeleton = cv2.bitwise_or(skeleton, temp) + image = eroded.copy() + + # if there are no more 'white' pixels in the image, then + # break from the loop + if area == area - cv2.countNonZero(image): + break + + # return the skeletonized image + return skeleton + +def opencv2matplotlib(image): + # OpenCV represents images in BGR order; however, Matplotlib + # expects the image in RGB order, so simply convert from BGR + # to RGB and return + return cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + +def url_to_image(url, readFlag=cv2.IMREAD_COLOR): + # download the image, convert it to a NumPy array, and then read + # it into OpenCV format + resp = urllib.urlopen(url) + image = np.asarray(bytearray(resp.read()), dtype="uint8") + image = cv2.imdecode(image, readFlag) + + # return the image + return image + +def auto_canny(image, sigma=0.33): + # compute the median of the single channel pixel intensities + v = np.median(image) + + # apply automatic Canny edge detection using the computed median + lower = int(max(0, (1.0 - sigma) * v)) + upper = int(min(255, (1.0 + sigma) * v)) + edged = cv2.Canny(image, lower, upper) + + # return the edged image + return edged \ No newline at end of file diff --git a/imutils/convenience.pyc b/imutils/convenience.pyc new file mode 100644 index 0000000..5e6376f Binary files /dev/null and b/imutils/convenience.pyc differ diff --git a/imutils/perspective.py b/imutils/perspective.py new file mode 100644 index 0000000..faacc29 --- /dev/null +++ b/imutils/perspective.py @@ -0,0 +1,67 @@ +# author: Adrian Rosebrock +# website: http://www.pyimagesearch.com + +# import the necessary packages +import numpy as np +import cv2 + +def order_points(pts): + # initialize a list of coordinates that will be ordered + # such that the first entry in the list is the top-left, + # the second entry is the top-right, the third is the + # bottom-right, and the fourth is the bottom-left + rect = np.zeros((4, 2), dtype="float32") + + # the top-left point will have the smallest sum, whereas + # the bottom-right point will have the largest sum + s = pts.sum(axis=1) + rect[0] = pts[np.argmin(s)] + rect[2] = pts[np.argmax(s)] + + # now, compute the difference between the points, the + # top-right point will have the smallest difference, + # whereas the bottom-left will have the largest difference + diff = np.diff(pts, axis=1) + rect[1] = pts[np.argmin(diff)] + rect[3] = pts[np.argmax(diff)] + + # return the ordered coordinates + return rect + +def four_point_transform(image, pts): + # obtain a consistent order of the points and unpack them + # individually + rect = order_points(pts) + (tl, tr, br, bl) = rect + + # compute the width of the new image, which will be the + # maximum distance between bottom-right and bottom-left + # x-coordiates or the top-right and top-left x-coordinates + widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2)) + widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2)) + maxWidth = max(int(widthA), int(widthB)) + + # compute the height of the new image, which will be the + # maximum distance between the top-right and bottom-right + # y-coordinates or the top-left and bottom-left y-coordinates + heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2)) + heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2)) + maxHeight = max(int(heightA), int(heightB)) + + # now that we have the dimensions of the new image, construct + # the set of destination points to obtain a "birds eye view", + # (i.e. top-down view) of the image, again specifying points + # in the top-left, top-right, bottom-right, and bottom-left + # order + dst = np.array([ + [0, 0], + [maxWidth - 1, 0], + [maxWidth - 1, maxHeight - 1], + [0, maxHeight - 1]], dtype="float32") + + # compute the perspective transform matrix and then apply it + M = cv2.getPerspectiveTransform(rect, dst) + warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight)) + + # return the warped image + return warped \ No newline at end of file diff --git a/imutils/perspective.pyc b/imutils/perspective.pyc new file mode 100644 index 0000000..636ffa6 Binary files /dev/null and b/imutils/perspective.pyc differ