diff --git a/combine_main.py b/combine_main.py index 0ed859a..6796763 100644 --- a/combine_main.py +++ b/combine_main.py @@ -83,11 +83,11 @@ def run(): #cv.waitKey(150) #continue - spawn_1 = vision_stun.find(screenshot, cv.imread("magic/spawn_1.jpg", cv.IMREAD_UNCHANGED), 0.5, 1, True, cv.imread("magic/spawn_1-mask.png", cv.IMREAD_UNCHANGED)) + spawn_1 = vision_stun.find(screenshot, cv.imread("magic/spawn_1.jpg", cv.IMREAD_UNCHANGED), 0.25, 1) if len(spawn_1) == 1: spawn_button_active = True points = vision_stun.get_click_points(spawn_1) - for i in range(0, 200, 1): + for i in range(0, 100, 1): pydirectinput.moveTo(points[0][0], points[0][1]) pydirectinput.mouseDown() w = random.randint(1, 50) @@ -121,7 +121,7 @@ def run(): cv.imshow("output_needle", processed_needle) cv.waitKey(150) - rectangles = vision_stun.find(processed_screenshot, processed_needle, 0.90, 2, True, None) + rectangles = vision_stun.find(processed_screenshot, processed_needle, 0.7, 2) # draw the detection results onto the original image #output_image = vision_stun.draw_rectangles(screenshot, rectangles) #cv.imshow("output_image", output_image) diff --git a/magic/spawn_1-mask.png b/magic/spawn_1-mask.png new file mode 100644 index 0000000..d7cba89 Binary files /dev/null and b/magic/spawn_1-mask.png differ diff --git a/magic/spawn_1.png b/magic/spawn_1.png new file mode 100644 index 0000000..1edeede Binary files /dev/null and b/magic/spawn_1.png differ diff --git a/vision.py b/vision.py index cdaca19..865958b 100644 --- a/vision.py +++ b/vision.py @@ -86,6 +86,70 @@ class Vision: return keep_rects + def find_comb(self, haystack_img, needle_img, threshold=0.5, max_results=10, normalize=False, mask=None): + # run the OpenCV algorithm + needle_w = needle_img.shape[1] + needle_h = needle_img.shape[0] + + if mask is not None: + result = cv.matchTemplate(haystack_img, needle_img, cv.TM_CCORR_NORMED, None, mask) + _minVal, _maxVal, minLoc, maxLoc = cv.minMaxLoc(result, None) + else: + result = cv.matchTemplate(haystack_img, needle_img, self.method) + + if normalize: + cv.normalize(result, result, 0, 1, cv.NORM_MINMAX, -1) + # Get the all the positions from the match result that exceed our threshold + _minVal, _maxVal, minLoc, maxLoc = cv.minMaxLoc(result, None) + if max_results == 1 and _maxVal >= threshold: + locations = [] + locations.append(maxLoc) + else: + locations = np.where(result >= threshold) + locations = list(zip(*locations[::-1])) + # print(locations) + + + #_minVal, _maxVal, minLoc, maxLoc = cv.minMaxLoc(result, None) + + # if we found no results, return now. this reshape of the empty array allows us to + # concatenate together results without causing an error + if not locations: + return np.array([], dtype=np.int32).reshape(0, 4) + #while len(locations) > 1000: + # threshold = threshold + 0.01 + # locations = np.where(result >= threshold) + # locations = list(zip(*locations[::-1])) + # print("modified treshhold to:" + str(threshold)) + # print("actual locations:" + str(len(locations))) + + if len(locations) > 5000: + return np.array([], dtype=np.int32).reshape(0, 4) + + # You'll notice a lot of overlapping rectangles get drawn. We can eliminate those redundant + # locations by using groupRectangles(). + # First we need to create the list of [x, y, w, h] rectangles + rectangles = [] + for loc in locations: + rect = [int(loc[0]), int(loc[1]), needle_w, needle_h] + # Add every box to the list twice in order to retain single (non-overlapping) boxes + rectangles.append(rect) + rectangles.append(rect) + # Apply group rectangles. + # The groupThreshold parameter should usually be 1. If you put it at 0 then no grouping is + # done. If you put it at 2 then an object needs at least 3 overlapping rectangles to appear + # in the result. I've set eps to 0.5, which is: + # "Relative difference between sides of the rectangles to merge them into a group." + rectangles, weights = cv.groupRectangles(rectangles, groupThreshold=1, eps=0.5) + # print(rectangles) + + # for performance reasons, return a limited number of results. + # these aren't necessarily the best results. + if len(rectangles) > max_results: + #print('Warning: too many results, raise the threshold.') + rectangles = rectangles[:max_results] + + return rectangles def find(self, haystack_img, needle_img, threshold=0.5, max_results=10, normalize=False, mask=None): # run the OpenCV algorithm @@ -94,13 +158,15 @@ class Vision: if mask is not None: result = cv.matchTemplate(haystack_img, needle_img, cv.TM_CCORR_NORMED, None, mask) - #_minVal, _maxVal, minLoc, maxLoc = cv.minMaxLoc(result, None) + _minVal, _maxVal, minLoc, maxLoc = cv.minMaxLoc(result, None) else: result = cv.matchTemplate(haystack_img, needle_img, self.method) if normalize: cv.normalize(result, result, 0, 1, cv.NORM_MINMAX, -1) # Get the all the positions from the match result that exceed our threshold +# _minVal, _maxVal, minLoc, maxLoc = cv.minMaxLoc(result, None) + locations = np.where(result >= threshold) locations = list(zip(*locations[::-1])) # print(locations)