Worked on feature extraction

This commit is contained in:
Lukas Wölfer
2020-06-17 20:12:54 +02:00
parent fc2e3aca4c
commit 11919bb13c

View File

@@ -7,19 +7,42 @@ import numpy as np
from typing import Any, Tuple from typing import Any, Tuple
def border_image(image, size=1, color=0):
for ring in range(size):
for x in range(ring, image.shape[0] - ring):
image[x][ring] = color
image[x][image.shape[1] - 1 - ring] = color
for y in range(ring, image.shape[1] - ring):
image[ring][y] = color
image[image.shape[0] - 1 - ring][y] = color
def prepare_image(image): def prepare_image(image):
cnt = get_contour(image)
mask = np.zeros(image.shape[:2], dtype=image.dtype)
contim = cv2.drawContours(mask, [cnt], 0, 255, cv2.FILLED)
# crop = np.multiply(edge_image, contim)
return contim
def get_contour(image):
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# edge_image = cv2.Canny(gray_image, 120, 160)
ret, edge_image = cv2.threshold(gray_image, 127, 255, cv2.THRESH_BINARY_INV) ret, edge_image = cv2.threshold(gray_image, 127, 255, cv2.THRESH_BINARY_INV)
contours2, hierarchy = cv2.findContours( border_image(edge_image, size=0)
contours, hierarchy = cv2.findContours(
edge_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE edge_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
) )
cnt2 = max(contours2, key=cv2.contourArea) cnt = max(contours, key=cv2.contourArea)
return cnt
mask = np.zeros(edge_image.shape, dtype=edge_image.dtype)
contim = cv2.drawContours(mask, [cnt2], 0, 1, cv2.FILLED) def matchScaleInvShape(cont1, cont2):
crop = np.multiply(edge_image, contim) m1 = cv2.moments(cont1)
return crop m2 = cv2.moments(cont2)
moments = [
(m1[moment], m2[moment]) for moment in m1 if str(moment).startswith("nu")
]
return sum([abs((nu1) - (nu2)) for nu1, nu2 in moments])
def match_template(image, template): def match_template(image, template):
@@ -31,14 +54,7 @@ def match_template(image, template):
template, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE template, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
) )
temcont = max(template_cont, key=cv2.contourArea) temcont = max(template_cont, key=cv2.contourArea)
return [ return [matchScaleInvShape(imcont, temcont)]
cv2.matchShapes(imcont, temcont, mode, 0.0)
for mode in (
cv2.CONTOURS_MATCH_I1,
cv2.CONTOURS_MATCH_I2,
cv2.CONTOURS_MATCH_I3,
)
]
def type_fine(one, other) -> bool: def type_fine(one, other) -> bool:
@@ -49,33 +65,36 @@ def type_fine(one, other) -> bool:
return False return False
return one.number == other.number return one.number == other.number
def check_type(matches, should_type):
def debug_match(image, image_type, catalogue): if not type_fine(matches[0][0], should_type):
img1 = prepare_image(image)
i1_matches = []
for index, (template_image, template_type) in enumerate(catalogue):
img2 = prepare_image(template_image)
i1_matches.append((template_type, match_template(img1, img2)[0], index))
i1_matches = sorted(i1_matches, key=lambda x: x[1])
if not type_fine(i1_matches[0][0], image_type):
correct_index = 0 correct_index = 0
for list_type, list_value, _ in i1_matches: for list_type, list_value, _ in matches:
if type_fine(list_type, image_type): if type_fine(list_type, should_type):
correct_value = list_value correct_value = list_value
break break
correct_index += 1 correct_index += 1
print( print(
f"{str(image_type):>20} matched as {str(i1_matches[0][0]):>20} {i1_matches[0][1]:.05f}, " f"{str(should_type):>20} matched as {str(matches[0][0]):>20} {matches[0][1]:.05f}, "
f"correct in pos {correct_index:02d} val {correct_value:.05f}" f"correct in pos {correct_index:02d} val {correct_value:.05f}"
) )
cv2.imshow("one", prepare_image(catalogue[i1_matches[0][2]][0])) cv2.imshow("one", prepare_image(catalogue[matches[0][2]][0]))
cv2.imshow("two", img1) cv2.imshow("two", img1)
cv2.imshow("three", prepare_image(catalogue[i1_matches[correct_index][2]][0])) cv2.imshow("three", prepare_image(catalogue[matches[correct_index][2]][0]))
cv2.waitKey(0) cv2.waitKey(0)
return return True
return False
def debug_match(image, image_type, catalogue):
cnt1 = prepare_image(image)
i1_matches = []
for index, (template_image, template_type) in enumerate(catalogue):
cnt2 = prepare_image(template_image)
i1_matches.append((template_type, matchScaleInvShape(cnt1, cnt2), index))
i1_matches = sorted(i1_matches, key=lambda x: x[1])
for list_type, list_value, list_index in i1_matches: for list_type, list_value, list_index in i1_matches:
if not type_fine(list_type, i1_matches[0][0]): if not type_fine(list_type, i1_matches[0][0]):
if list_value * 0.6 < i1_matches[0][1]: if list_value * 0.4 < i1_matches[0][1]:
print( print(
f"{str(image_type):>20} {i1_matches[0][1]:.05f} very close" f"{str(image_type):>20} {i1_matches[0][1]:.05f} very close"
f" match with {str(list_type):>20} {list_value:.05f}" f" match with {str(list_type):>20} {list_value:.05f}"