diff --git a/.vscode/settings.json b/.vscode/settings.json index 1730c2b..a6c1772 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -9,5 +9,12 @@ "python.testing.pytestEnabled": false, "python.testing.nosetestsEnabled": false, "python.testing.unittestEnabled": true, - "python.pythonPath": "/home/lukas/.local/share/virtualenvs/shenzhen-solitaire-nsu5dgrx/bin/python" + "python.linting.mypyArgs": [ + "--ignore-missing-imports", + "--follow-imports=silent", + "--show-column-numbers", + "--strict" + ], + "python.linting.mypyEnabled": true, + "python.formatting.provider": "black" } \ No newline at end of file diff --git a/tools/feature_extraction.py b/tools/feature_extraction.py index a43f794..224dcf0 100644 --- a/tools/feature_extraction.py +++ b/tools/feature_extraction.py @@ -4,10 +4,10 @@ from shenzhen_solitaire.board import NumberCard, SpecialCard import cv2 import numpy as np -from typing import Any, Tuple +from typing import Any, Tuple, List, Union, Dict, Optional -def border_image(image, size=1, color=0): +def border_image(image: np.array, size: int = 1, color: int = 0) -> None: for ring in range(size): for x in range(ring, image.shape[0] - ring): image[x][ring] = color @@ -17,47 +17,39 @@ def border_image(image, size=1, color=0): image[image.shape[0] - 1 - ring][y] = color -def prepare_image(image): +def prepare_image(image: np.array) -> np.array: cnt = get_contour(image) mask = np.zeros(image.shape[:2], dtype=image.dtype) contim = cv2.drawContours(mask, [cnt], 0, 255, cv2.FILLED) - # crop = np.multiply(edge_image, contim) return contim -def get_contour(image): +def get_contour(image: np.array) -> np.array: gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) ret, edge_image = cv2.threshold(gray_image, 127, 255, cv2.THRESH_BINARY_INV) - border_image(edge_image, size=0) + kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)) + edge_image = cv2.morphologyEx(edge_image, cv2.MORPH_CLOSE, kernel) + border_image(edge_image, size=1) contours, hierarchy = cv2.findContours( edge_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE ) cnt = max(contours, key=cv2.contourArea) + assert isinstance(cnt, np.ndarray) return cnt -def matchScaleInvShape(cont1, cont2): +def matchScaleInvShape(cont1: np.array, cont2: np.array) -> float: m1 = cv2.moments(cont1) m2 = cv2.moments(cont2) moments = [ (m1[moment], m2[moment]) for moment in m1 if str(moment).startswith("nu") ] - return sum([abs((nu1) - (nu2)) for nu1, nu2 in moments]) + return sum([abs((nu1) - (nu2)) * 1000 for nu1, nu2 in moments]) -def match_template(image, template): - image_cont, hierarchy = cv2.findContours( - image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE - ) - imcont = max(image_cont, key=cv2.contourArea) - template_cont, hierarchy = cv2.findContours( - template, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE - ) - temcont = max(template_cont, key=cv2.contourArea) - return [matchScaleInvShape(imcont, temcont)] - - -def type_fine(one, other) -> bool: +def type_fine( + one: Union[SpecialCard, NumberCard], other: Union[SpecialCard, NumberCard] +) -> bool: if isinstance(one, SpecialCard): return one == other assert isinstance(one, NumberCard) @@ -65,7 +57,10 @@ def type_fine(one, other) -> bool: return False return one.number == other.number -def check_type(matches, should_type): + +def check_type( + matches: List[Any], should_type: Union[SpecialCard, NumberCard] +) -> Optional[int]: if not type_fine(matches[0][0], should_type): correct_index = 0 for list_type, list_value, _ in matches: @@ -77,24 +72,43 @@ def check_type(matches, should_type): f"{str(should_type):>20} matched as {str(matches[0][0]):>20} {matches[0][1]:.05f}, " f"correct in pos {correct_index:02d} val {correct_value:.05f}" ) - cv2.imshow("one", prepare_image(catalogue[matches[0][2]][0])) - cv2.imshow("two", img1) - cv2.imshow("three", prepare_image(catalogue[matches[correct_index][2]][0])) - cv2.waitKey(0) - return True - return False + catalogue_index = matches[correct_index][2] + assert isinstance(catalogue_index, int) + return catalogue_index + return None -def debug_match(image, image_type, catalogue): + +def show_wrong_images( + current: np.ndarray, correct: np.ndarray, wrong: np.ndarray +) -> None: + cv2.imshow("Current", current) + cv2.imshow("Correct", correct) + cv2.imshow("Wrong", wrong) + cv2.waitKey(0) + + +def debug_match( + image: np.array, + image_type: Union[NumberCard, SpecialCard], + catalogue: List[Tuple[Any, Union[SpecialCard, NumberCard]]], +) -> None: cnt1 = prepare_image(image) i1_matches = [] for index, (template_image, template_type) in enumerate(catalogue): cnt2 = prepare_image(template_image) i1_matches.append((template_type, matchScaleInvShape(cnt1, cnt2), index)) i1_matches = sorted(i1_matches, key=lambda x: x[1]) - + correct_type_index = check_type(i1_matches, image_type) + if correct_type_index is not None: + show_wrong_images( + cnt1, + prepare_image(catalogue[correct_type_index][0]), + prepare_image(catalogue[i1_matches[0][2]][0]), + ) + return for list_type, list_value, list_index in i1_matches: if not type_fine(list_type, i1_matches[0][0]): - if list_value * 0.4 < i1_matches[0][1]: + if list_value * 0.8 < i1_matches[0][1]: print( f"{str(image_type):>20} {i1_matches[0][1]:.05f} very close" f" match with {str(list_type):>20} {list_value:.05f}"