diff --git a/1.jpg b/1.jpg new file mode 100644 index 0000000..4b9f2e0 Binary files /dev/null and b/1.jpg differ diff --git a/2.jpg b/2.jpg new file mode 100644 index 0000000..606538a Binary files /dev/null and b/2.jpg differ diff --git a/3.jpg b/3.jpg new file mode 100644 index 0000000..dcc8791 Binary files /dev/null and b/3.jpg differ diff --git a/4.jpg b/4.jpg new file mode 100644 index 0000000..a765952 Binary files /dev/null and b/4.jpg differ diff --git a/Brown0.jpg b/Brown0.jpg new file mode 100644 index 0000000..acdde21 Binary files /dev/null and b/Brown0.jpg differ diff --git a/D1.jpg b/D1.jpg new file mode 100644 index 0000000..b7e7bdb Binary files /dev/null and b/D1.jpg differ diff --git a/D2.jpg b/D2.jpg new file mode 100644 index 0000000..5faf0b8 Binary files /dev/null and b/D2.jpg differ diff --git a/D3.jpg b/D3.jpg new file mode 100644 index 0000000..195b20e Binary files /dev/null and b/D3.jpg differ diff --git a/D4.jpg b/D4.jpg new file mode 100644 index 0000000..c96b42f Binary files /dev/null and b/D4.jpg differ diff --git a/D5.jpg b/D5.jpg new file mode 100644 index 0000000..ebe36fc Binary files /dev/null and b/D5.jpg differ diff --git a/D6.jpg b/D6.jpg new file mode 100644 index 0000000..deb99ba Binary files /dev/null and b/D6.jpg differ diff --git a/D7.jpg b/D7.jpg new file mode 100644 index 0000000..cdfb526 Binary files /dev/null and b/D7.jpg differ diff --git a/H1.jpg b/H1.jpg new file mode 100644 index 0000000..2c4fe44 Binary files /dev/null and b/H1.jpg differ diff --git a/H2.jpg b/H2.jpg new file mode 100644 index 0000000..89ac528 Binary files /dev/null and b/H2.jpg differ diff --git a/H3.jpg b/H3.jpg new file mode 100644 index 0000000..9c090c5 Binary files /dev/null and b/H3.jpg differ diff --git a/H4.jpg b/H4.jpg new file mode 100644 index 0000000..6cc4922 Binary files /dev/null and b/H4.jpg differ diff --git a/candy-c/grid.jpg b/candy-c/grid.jpg new file mode 100644 index 0000000..9ac1662 Binary files /dev/null and b/candy-c/grid.jpg differ diff --git a/candy-c/repeat.jpg b/candy-c/repeat.jpg new file mode 100644 index 0000000..408e0ee Binary files /dev/null and b/candy-c/repeat.jpg differ diff --git a/candy-c/reset.jpg b/candy-c/reset.jpg new file mode 100644 index 0000000..039e9a9 Binary files /dev/null and b/candy-c/reset.jpg differ diff --git a/canny.jpg b/canny.jpg new file mode 100644 index 0000000..370cfb3 Binary files /dev/null and b/canny.jpg differ diff --git a/combine_main.py b/combine_main.py new file mode 100644 index 0000000..100a3fd --- /dev/null +++ b/combine_main.py @@ -0,0 +1,207 @@ +import random +from time import time +from window_capture import WindowCapture +from stun_vision import StunVision +import cv2 as cv +import pytesseract +from hsvfilter import HsvFilter +from config_file import UserConfigs +# import pyautogui +import pydirectinput +import keyboard + + +def run(): + # initialize the user-class + config = UserConfigs() + + # initialize the StunWindowCapture class + try: + capture_window = WindowCapture( + None, "stun", config) + video_mode = False + except: + # StunWindowCapture.list_window_names() + # print("Game not running, switching to video mode") + # capture_window = cv.VideoCapture("snip_slam.mp4") + video_mode = True + + # initialize the StunVision class + vision_stun = StunVision() + # initialize the StunOverlay class + hsv_filter_orange = HsvFilter(10, 156, 0, 17, 255, 255, 0, 0, 0, 0) + hsv_filter_p = HsvFilter(130, 156, 0, 179, 255, 255, 0, 0, 0, 0) + hsv_filter_b = HsvFilter(88, 156, 0, 128, 255, 255, 0, 0, 0, 0) + hsv_filter_g = HsvFilter(34, 156, 0, 74, 255, 255, 0, 0, 0, 0) + hsv_filter_y = HsvFilter(24, 156, 0, 33, 255, 255, 0, 0, 0, 0) + #hsv_filter_0 = HsvFilter(0, 156, 0, 7, 255, 255, 0, 0, 0, 0) + hsv_filter_0 = HsvFilter(0, 0, 0, 179, 255, 255, 255, 0, 0, 0) + hsv_filter_w = HsvFilter(69, 25, 0, 94, 255, 255, 0, 0, 0, 0) + + loop_time = time() + event_time = 0.0 + pointstore = [] + max_results = 0 + pause = True + + magic_list = {"1": "body", "2": "finding", "3": "mind", "4": "perceiving", "5": "physical", "6": "seeing", + "7": "spiritual"} + tier_list = {"1": "0", "2": "orange", "3": "y", "4": "g", "5": "b", "6": "p"} # , "w"} + hsv_filter_list = {"1": hsv_filter_0, "2": hsv_filter_orange, "3": hsv_filter_y, "4": hsv_filter_g, + "5": hsv_filter_b, "6": hsv_filter_p} + + needle_list = [] + hsv_list = [] + # tmp = [] + for key1 in tier_list: + for key2 in magic_list: + + needle_list.append(cv.imread("magic/" + magic_list[key2] + "_" + tier_list[key1] + ".jpg", + cv.IMREAD_UNCHANGED)) + hsv_list.append(hsv_filter_list[key1]) + + + while True: + if keyboard.is_pressed('p') == True: + pause = True + print('q pressed') + elif keyboard.is_pressed('o') == True: + pause = False + print('o pressed') + if pause: + # cv.waitKey(500) + print("pausing") + continue + + if video_mode: + break + else: + try: + # get an updated image of the game + screenshot = capture_window.get_screenshot() + # screenshot = cv.imread("buffbar.jpg") + except: + capture_window.release() + print("Game window not available - shutting down application") + break + #cv.imshow("screenshot", screenshot) + #cv.waitKey(150) + #continue + + spawn_1 = vision_stun.find(screenshot, cv.imread("magic/spawn_1.jpg", cv.IMREAD_UNCHANGED), 0.5, 1) + if len(spawn_1) == 1: + spawn_button_active = True + points = vision_stun.get_click_points(spawn_1) + for i in range(0, 200, 1): + pydirectinput.moveTo(points[0][0], points[0][1]) + pydirectinput.mouseDown() + w = random.randint(1, 50) + cv.waitKey(30 + w) + pydirectinput.mouseUp() + else: + spawn_0 = vision_stun.find(screenshot, cv.imread("magic/spawn_0.jpg", cv.IMREAD_UNCHANGED), 0.7, 1) + points = vision_stun.get_click_points(spawn_0) + for point in points: + pydirectinput.moveTo(point[0], point[1]) + pydirectinput.mouseDown() + cv.waitKey(500) + pydirectinput.mouseUp() + continue + + # for needles in needle_list: + for rer in range(0, len(needle_list), 1): + #for needle in needle_list: + # do object detection + # processed_image = vision_stun.apply_hsv_filter(screenshot, hsv_filter) + # screenshot = capture_window.get_screenshot() + #cv.imshow("output_image", needle_list[rer]) + #cv.waitKey(150) + + while True: + screenshot = capture_window.get_screenshot() + processed_screenshot = vision_stun.apply_hsv_filter(screenshot, hsv_list[rer]) + processed_needle = vision_stun.apply_hsv_filter(needle_list[rer], hsv_list[rer]) + + #cv.imshow("output_image", processed_screenshot) + cv.imshow("output_needle", processed_needle) + cv.waitKey(150) + + rectangles = vision_stun.find(processed_screenshot, processed_needle, 0.70, 2) + # draw the detection results onto the original image + #output_image = vision_stun.draw_rectangles(screenshot, rectangles) + #cv.imshow("output_image", output_image) + #cv.waitKey(150) + + # only trigger ocr reading if a stun is detected + points = vision_stun.get_click_points(rectangles) + if len(points) == 2: + pydirectinput.moveTo(points[0][0], points[0][1]) + pydirectinput.mouseDown() + w = random.randint(1, 100) + cv.waitKey(250 + w) + pydirectinput.moveTo(points[1][0], points[1][1]) + pydirectinput.mouseUp() + if keyboard.is_pressed('p') == True or pause == True: + pause = True + break + else: + break + if keyboard.is_pressed('p') == True or pause == True: + pause = True + break + if keyboard.is_pressed('p') == True or pause == True: + pause = True + break + + needles_white = [] + needles_white.append(cv.imread("magic/body_w.jpg", cv.IMREAD_UNCHANGED)) + needles_white.append(cv.imread("magic/finding_w.jpg", cv.IMREAD_UNCHANGED)) + needles_white.append(cv.imread("magic/mind_w.jpg", cv.IMREAD_UNCHANGED)) + needles_white.append(cv.imread("magic/perceiving_w.jpg", cv.IMREAD_UNCHANGED)) + needles_white.append(cv.imread("magic/physical_w.jpg", cv.IMREAD_UNCHANGED)) + needles_white.append(cv.imread("magic/seeing_w.jpg", cv.IMREAD_UNCHANGED)) + needles_white.append(cv.imread("magic/spiritual_w.jpg", cv.IMREAD_UNCHANGED)) + + for needle_w in needles_white: + # do object detection + screenshot = capture_window.get_screenshot() + processed_screenshot = vision_stun.apply_hsv_filter(screenshot,hsv_filter_w) + processed_needle = vision_stun.apply_hsv_filter(needle_w, hsv_filter_w) + rectangles = vision_stun.find(processed_screenshot, processed_needle, 0.7, 1) + # draw the detection results onto the original image + # output_image = vision_stun.draw_rectangles(screenshot, rectangles) + # cv.imshow("output_image", output_image) + # cv.waitKey(150) + + points = vision_stun.get_click_points(rectangles) + if len(points) >= 1: + pydirectinput.moveTo(points[0][0], points[0][1]) + pydirectinput.mouseDown() + w = random.randint(1, 100) + cv.waitKey(100 + w) + pydirectinput.mouseUp() + screenshot = capture_window.get_screenshot() + rectangles = vision_stun.find(screenshot, cv.imread("magic/collect.jpg", cv.IMREAD_UNCHANGED), 0.8, 1) + points = vision_stun.get_click_points(rectangles) + if len(points) >= 1: + pydirectinput.moveTo(points[0][0], points[0][1]) + pydirectinput.mouseDown() + w = random.randint(1, 100) + cv.waitKey(100 + w) + pydirectinput.mouseUp() + + if keyboard.is_pressed('p') == True or pause == True: + pause = True + break + if keyboard.is_pressed('p') == True or pause == True: + pause = True + break + + # debug the loop rate + print('FPS {}'.format(1 / (time() - loop_time))) + loop_time = time() + cv.waitKey(150) + + +if __name__ == "__main__": + run() diff --git a/config_file.py b/config_file.py new file mode 100644 index 0000000..192fdf7 --- /dev/null +++ b/config_file.py @@ -0,0 +1,77 @@ +import os + +# daoc chatwindow string: +# ChatWindow2=Wtf,1615,964,780,476,100,100,16,1,0,1 +class UserConfigs: + THALOUSER = "Thaloria" + ADWAUSER = "Adwa" + EDDIEUSER = "Eddie" + + user: str + + def __init__(self): + + user_from_env = os.environ.get('PYTHON_USER') + if user_from_env == self.THALOUSER: + self.user = self.THALOUSER + elif user_from_env == self.EDDIEUSER: + self.user = self.EDDIEUSER + else: + self.user = self.ADWAUSER + + def returnStunWindowPos(self): + if self.user == self.THALOUSER: + #return [1410, 1128, 402, 22] + return [1800, 1150, 0, 0] + elif self.user == self.ADWAUSER: + return [740, 450, 1625, 985] + elif self.user == self.EDDIEUSER: + return [740, 450, 1625, 985] + else: + pass + + def returnPoisonWindowPos(self): + if self.user == self.THALOUSER: + return [740, 442, 1625, 985] + elif self.user == self.ADWAUSER: + return [740, 442, 1625, 985] + elif self.user == self.EDDIEUSER: + return [740, 442, 1625, 985] + else: + pass + + def returnPoisonOverlayPos(self): + if self.user == self.THALOUSER: + return '160x160+-900+760' + elif self.user == self.ADWAUSER: + return '160x160+-900+760' + elif self.user == self.EDDIEUSER: + return '160x160+-900+760' + else: + pass + + def returnStunOverlayPos(self): + if self.user == self.THALOUSER: + return '160x160+-1360+350' + elif self.user == self.ADWAUSER: + return '160x160+-900+600' + elif self.user == self.EDDIEUSER: + return '160x160+-1360+350' + else: + pass + + def returnEnemyPlayerOverlayPos(self): + if self.user == self.THALOUSER: + return '330x45+-900+920' + elif self.user == self.ADWAUSER: + return '160x160+-900+600' + elif self.user == self.EDDIEUSER: + return '330x45+-900+920' + else: + pass + + def return_database_ip(self): + if self.user == self.THALOUSER: + return "192.168.178.201" + else: + return "thalo.ddns.net" diff --git a/digging.jpg b/digging.jpg new file mode 100644 index 0000000..250e0e9 Binary files /dev/null and b/digging.jpg differ diff --git a/digging_main.py b/digging_main.py new file mode 100644 index 0000000..9286238 --- /dev/null +++ b/digging_main.py @@ -0,0 +1,164 @@ +import random +from time import time +from window_capture import WindowCapture +from stun_vision import StunVision +import cv2 as cv +import pytesseract +from hsvfilter import HsvFilter +from config_file import UserConfigs +#import pyautogui +import pydirectinput +import keyboard +from tresh_util import super_tresh_main, super_tresh_needle + + +def run(): + # initialize the user-class + config = UserConfigs() + + # initialize the StunWindowCapture class + try: + capture_window = WindowCapture( + None, "stun", config) + video_mode = False + except: + # StunWindowCapture.list_window_names() + #print("Game not running, switching to video mode") + #capture_window = cv.VideoCapture("snip_slam.mp4") + video_mode = True + + # initialize the StunVision class + vision_stun = StunVision() + # initialize the StunOverlay class + hsv_filter = HsvFilter(0, 0, 124, 15, 255, 168, 0, 255, 0, 0) + + loop_time = time() + event_time = 0.0 + pointstore = [] + max_results = 0 + pause = True + while True: + if keyboard.is_pressed('p') == True: + pause = True + print('q pressed') + elif keyboard.is_pressed('o') == True: + pause = False + print('o pressed') + if pause: + #cv.waitKey(500) + print("pausing") + continue + + + + if video_mode: + break + else: + try: + # get an updated image of the game + screenshot = capture_window.get_screenshot() + # screenshot = cv.imread("buffbar.jpg") + except: + capture_window.release() + print("Game window not available - shutting down application") + break + #cv.imshow("screenshot", screenshot) + #cv.waitKey(150) + #continue + + needles = [] + needles.append(cv.imread("wtf.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("Brown0.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("1.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("2.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("3.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("4.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("H1.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("H2.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("H3.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("H4.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("D1.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("D2.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("D3.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("D3.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("D4.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("D5.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("D6.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("D7.jpg", cv.IMREAD_UNCHANGED)) + + for needle in needles: + # do object detection + screenshot = capture_window.get_screenshot() + rectangles = vision_stun.find(screenshot, needle, 0.7, 1) + # draw the detection results onto the original image + if len(rectangles) == 0: + continue + #output_image = vision_stun.draw_rectangles(screenshot, rectangles) + #cv.imshow("output_image", output_image) + #cv.waitKey(150) + + # only trigger ocr reading if a stun is detected + points = vision_stun.get_click_points(rectangles) + for point in points: + # 46 + 1 * 30 1410 == 1815 - 405 return [1410, 1128, 402, 22] + # 44 + 1 * 30 1350 == 1790 - 440 + # left_border = 402, 432 + # right_border = 1812, 1782 + # upper_border = 22, 50 + # lower_border = 1150, 1120 + + size = rectangles[0][2] + 1 + + left = int(round(rectangles[0][0] / size, 0)) # 4 + down = int(round(rectangles[0][1] / size, 0)) # 23 + offset_left = config.returnStunWindowPos()[2] + offset_down = config.returnStunWindowPos()[3] + # 167 1055 start + # 3x47 left 26x right to 30 + # 1x down 22x up to 24 + + # start 167, end 167 - (47 * 3), step -47 + start_left = point[0] - (size * left) + start_up = point[1] - (size * down) + for f in range(start_up, start_up + (size * 24), size): + for i in range(start_left, start_left + (size * 30), size): + pydirectinput.moveTo(i + offset_left, f + offset_down) + pydirectinput.mouseDown() + w = random.randint(1, 100) + cv.waitKey(150 + w) + pydirectinput.mouseUp() + if keyboard.is_pressed('p') == True or pause == True: + pause = True + break + + screenshot = capture_window.get_screenshot() + rectangles = vision_stun.find(screenshot, cv.imread("ok_button.jpg", cv.IMREAD_UNCHANGED), 0.8, 1) + # draw the detection results onto the original image + output_image = vision_stun.draw_rectangles(screenshot, rectangles) + if len(rectangles) == 1: + pointis = vision_stun.get_click_points(rectangles) + for pointi in pointis: + pydirectinput.moveTo(pointi[0] + offset_left, pointi[1] + offset_down) + pydirectinput.mouseDown() + w = random.randint(1, 100) + cv.waitKey(150 + w) + pydirectinput.mouseUp() + + if keyboard.is_pressed('p') == True or pause == True: + pause = True + break + if keyboard.is_pressed('p') == True or pause == True: + pause = True + break + if keyboard.is_pressed('p') == True or pause == True: + pause = True + break + + # debug the loop rate + print('FPS {}'.format(1 / (time() - loop_time))) + loop_time = time() + cv.waitKey(150) + + +if __name__ == "__main__": + run() diff --git a/equip/bag_1_32.jpg b/equip/bag_1_32.jpg new file mode 100644 index 0000000..9c9eb5a Binary files /dev/null and b/equip/bag_1_32.jpg differ diff --git a/equip/bag_2_32.jpg b/equip/bag_2_32.jpg new file mode 100644 index 0000000..e493f14 Binary files /dev/null and b/equip/bag_2_32.jpg differ diff --git a/equip/bag_3_32.jpg b/equip/bag_3_32.jpg new file mode 100644 index 0000000..2a4376c Binary files /dev/null and b/equip/bag_3_32.jpg differ diff --git a/equip/bag_4_32.jpg b/equip/bag_4_32.jpg new file mode 100644 index 0000000..389a2e6 Binary files /dev/null and b/equip/bag_4_32.jpg differ diff --git a/equip/bag_5_32.jpg b/equip/bag_5_32.jpg new file mode 100644 index 0000000..8c80c39 Binary files /dev/null and b/equip/bag_5_32.jpg differ diff --git a/equip/bag_6_32.jpg b/equip/bag_6_32.jpg new file mode 100644 index 0000000..d1b3a27 Binary files /dev/null and b/equip/bag_6_32.jpg differ diff --git a/equip/bag_7_32.jpg b/equip/bag_7_32.jpg new file mode 100644 index 0000000..a92400c Binary files /dev/null and b/equip/bag_7_32.jpg differ diff --git a/equip/book_1_32.jpg b/equip/book_1_32.jpg new file mode 100644 index 0000000..cba3b6d Binary files /dev/null and b/equip/book_1_32.jpg differ diff --git a/equip/book_2_32.jpg b/equip/book_2_32.jpg new file mode 100644 index 0000000..ff9c107 Binary files /dev/null and b/equip/book_2_32.jpg differ diff --git a/equip/book_3_32.jpg b/equip/book_3_32.jpg new file mode 100644 index 0000000..aa3a230 Binary files /dev/null and b/equip/book_3_32.jpg differ diff --git a/equip/book_4_32.jpg b/equip/book_4_32.jpg new file mode 100644 index 0000000..60e57dc Binary files /dev/null and b/equip/book_4_32.jpg differ diff --git a/equip/book_5_32.jpg b/equip/book_5_32.jpg new file mode 100644 index 0000000..1fb9a10 Binary files /dev/null and b/equip/book_5_32.jpg differ diff --git a/equip/book_6_32.jpg b/equip/book_6_32.jpg new file mode 100644 index 0000000..5d55bfd Binary files /dev/null and b/equip/book_6_32.jpg differ diff --git a/equip/book_7_32.jpg b/equip/book_7_32.jpg new file mode 100644 index 0000000..613e70c Binary files /dev/null and b/equip/book_7_32.jpg differ diff --git a/equip/chest_1_32.jpg b/equip/chest_1_32.jpg new file mode 100644 index 0000000..3a4391a Binary files /dev/null and b/equip/chest_1_32.jpg differ diff --git a/equip/chest_2_32.jpg b/equip/chest_2_32.jpg new file mode 100644 index 0000000..17c547b Binary files /dev/null and b/equip/chest_2_32.jpg differ diff --git a/equip/chest_3_32.jpg b/equip/chest_3_32.jpg new file mode 100644 index 0000000..6ce5e08 Binary files /dev/null and b/equip/chest_3_32.jpg differ diff --git a/equip/chest_4_32.jpg b/equip/chest_4_32.jpg new file mode 100644 index 0000000..9f482a7 Binary files /dev/null and b/equip/chest_4_32.jpg differ diff --git a/equip/key_1_32.jpg b/equip/key_1_32.jpg new file mode 100644 index 0000000..d222d1e Binary files /dev/null and b/equip/key_1_32.jpg differ diff --git a/equip/key_2_32.jpg b/equip/key_2_32.jpg new file mode 100644 index 0000000..434ba0e Binary files /dev/null and b/equip/key_2_32.jpg differ diff --git a/equip/key_3_32.jpg b/equip/key_3_32.jpg new file mode 100644 index 0000000..bda5ccb Binary files /dev/null and b/equip/key_3_32.jpg differ diff --git a/equip/key_4_32.jpg b/equip/key_4_32.jpg new file mode 100644 index 0000000..3611eb4 Binary files /dev/null and b/equip/key_4_32.jpg differ diff --git a/equip/main_screen.jpg b/equip/main_screen.jpg new file mode 100644 index 0000000..90ca14b Binary files /dev/null and b/equip/main_screen.jpg differ diff --git a/equip/mush_1_32.jpg b/equip/mush_1_32.jpg new file mode 100644 index 0000000..caee857 Binary files /dev/null and b/equip/mush_1_32.jpg differ diff --git a/equip/mush_2_32.jpg b/equip/mush_2_32.jpg new file mode 100644 index 0000000..6e2e828 Binary files /dev/null and b/equip/mush_2_32.jpg differ diff --git a/equip/mush_3_32.jpg b/equip/mush_3_32.jpg new file mode 100644 index 0000000..54e3bb1 Binary files /dev/null and b/equip/mush_3_32.jpg differ diff --git a/equip/mush_4_32.jpg b/equip/mush_4_32.jpg new file mode 100644 index 0000000..f40d10c Binary files /dev/null and b/equip/mush_4_32.jpg differ diff --git a/equip/mush_5_32.jpg b/equip/mush_5_32.jpg new file mode 100644 index 0000000..c8cb9d6 Binary files /dev/null and b/equip/mush_5_32.jpg differ diff --git a/equip/mush_6_32.jpg b/equip/mush_6_32.jpg new file mode 100644 index 0000000..3dbb47c Binary files /dev/null and b/equip/mush_6_32.jpg differ diff --git a/equip/mush_7_32.jpg b/equip/mush_7_32.jpg new file mode 100644 index 0000000..3b4d6bc Binary files /dev/null and b/equip/mush_7_32.jpg differ diff --git a/equip/mush_8_32.jpg b/equip/mush_8_32.jpg new file mode 100644 index 0000000..dd72e69 Binary files /dev/null and b/equip/mush_8_32.jpg differ diff --git a/equip/mush_e2_32.jpg b/equip/mush_e2_32.jpg new file mode 100644 index 0000000..4b081eb Binary files /dev/null and b/equip/mush_e2_32.jpg differ diff --git a/equip/mush_e_32.jpg b/equip/mush_e_32.jpg new file mode 100644 index 0000000..7b1b06a Binary files /dev/null and b/equip/mush_e_32.jpg differ diff --git a/equip/pot_1_32.jpg b/equip/pot_1_32.jpg new file mode 100644 index 0000000..24a0a73 Binary files /dev/null and b/equip/pot_1_32.jpg differ diff --git a/equip/pot_2_32.jpg b/equip/pot_2_32.jpg new file mode 100644 index 0000000..8e1a694 Binary files /dev/null and b/equip/pot_2_32.jpg differ diff --git a/equip/pot_3_32.jpg b/equip/pot_3_32.jpg new file mode 100644 index 0000000..8cb0252 Binary files /dev/null and b/equip/pot_3_32.jpg differ diff --git a/equip/pot_4_32.jpg b/equip/pot_4_32.jpg new file mode 100644 index 0000000..d58979e Binary files /dev/null and b/equip/pot_4_32.jpg differ diff --git a/equip/pot_5_32.jpg b/equip/pot_5_32.jpg new file mode 100644 index 0000000..d7fd5a5 Binary files /dev/null and b/equip/pot_5_32.jpg differ diff --git a/equip/pot_6_32.jpg b/equip/pot_6_32.jpg new file mode 100644 index 0000000..92b2339 Binary files /dev/null and b/equip/pot_6_32.jpg differ diff --git a/equipment_main.py b/equipment_main.py new file mode 100644 index 0000000..dd7fb51 --- /dev/null +++ b/equipment_main.py @@ -0,0 +1,225 @@ +import random +from time import time +from window_capture import WindowCapture +from stun_vision import StunVision +import cv2 as cv +import pytesseract +from hsvfilter import HsvFilter +from config_file import UserConfigs +# import pyautogui +import pydirectinput +import keyboard +from tresh_util import super_tresh_main, super_tresh_needle + + +def run(): + # initialize the user-class + config = UserConfigs() + + # initialize the StunWindowCapture class + try: + capture_window = WindowCapture( + None, "stun", config) + video_mode = False + except: + # StunWindowCapture.list_window_names() + # print("Game not running, switching to video mode") + # capture_window = cv.VideoCapture("snip_slam.mp4") + video_mode = True + + # initialize the StunVision class + vision_stun = StunVision() + # initialize the StunOverlay class + hsv_filter = HsvFilter(0, 0, 124, 15, 255, 168, 0, 255, 0, 0) + + loop_time = time() + event_time = 0.0 + pointstore = [] + max_results = 0 + pause = True + while True: + if keyboard.is_pressed('p') == True: + pause = True + print('q pressed') + elif keyboard.is_pressed('o') == True: + pause = False + print('o pressed') + if pause: + # cv.waitKey(500) + print("pausing") + continue + + if video_mode: + break + else: + try: + # get an updated image of the game + screenshot = capture_window.get_screenshot() + # screenshot = cv.imread("buffbar.jpg") + except: + capture_window.release() + print("Game window not available - shutting down application") + break + # cv.imshow("screenshot", screenshot) + # cv.waitKey(150) + # continue + + spawn_1 = vision_stun.find(screenshot, cv.imread("equip/mush_e2_32.jpg", cv.IMREAD_UNCHANGED), 0.4, 1) + if len(spawn_1) == 1: + spawn_button_active = True + points = vision_stun.get_click_points(spawn_1) + for i in range(0, 15, 1): + pydirectinput.moveTo(points[0][0], points[0][1]) + pydirectinput.mouseDown() + w = random.randint(1, 50) + cv.waitKey(30 + w) + pydirectinput.mouseUp() + if keyboard.is_pressed('p') == True: + pause = True + print('q pressed') + break + else: + spawn_0 = vision_stun.find(screenshot, cv.imread("equip/mush_e_32.jpg", cv.IMREAD_UNCHANGED), 0.7, 1) + points = vision_stun.get_click_points(spawn_0) + for point in points: + pydirectinput.moveTo(point[0], point[1]) + pydirectinput.mouseDown() + cv.waitKey(500) + pydirectinput.mouseUp() + if keyboard.is_pressed('p') == True: + pause = True + print('q pressed') + break + continue + + needles = [] + + needles.append(cv.imread("equip/chest_2_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/book_1_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/book_2_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/book_5_32.jpg", cv.IMREAD_UNCHANGED)) + + #needles.append(cv.imread("equip/key_1_32.jpg", cv.IMREAD_UNCHANGED)) + #needles.append(cv.imread("equip/key_2_32.jpg", cv.IMREAD_UNCHANGED)) + #needles.append(cv.imread("equip/key_3_32.jpg", cv.IMREAD_UNCHANGED)) + # needles.append(cv.imread("equip/key_4_32.jpg", cv.IMREAD_UNCHANGED)) + + #needles.append(cv.imread("equip/chest_1_32.jpg", cv.IMREAD_UNCHANGED)) + #needles.append(cv.imread("equip/chest_2_32.jpg", cv.IMREAD_UNCHANGED)) + #needles.append(cv.imread("equip/chest_3_32.jpg", cv.IMREAD_UNCHANGED)) + # needles.append(cv.imread("equip/chest_4_32.jpg", cv.IMREAD_UNCHANGED)) + + needles.append(cv.imread("equip/mush_1_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/mush_2_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/mush_3_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/mush_4_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/mush_5_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/mush_6_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/mush_7_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/mush_8_32.jpg", cv.IMREAD_UNCHANGED)) + + needles.append(cv.imread("equip/pot_1_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/pot_2_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/pot_3_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/pot_4_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/pot_5_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/pot_6_32.jpg", cv.IMREAD_UNCHANGED)) + + needles.append(cv.imread("equip/bag_1_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/bag_2_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/bag_3_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/bag_4_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/bag_5_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/bag_6_32.jpg", cv.IMREAD_UNCHANGED)) + # needles.append(cv.imread("equip/bag_7_32.jpg", cv.IMREAD_UNCHANGED)) + + needles.append(cv.imread("equip/book_3_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/book_4_32.jpg", cv.IMREAD_UNCHANGED)) + + needles.append(cv.imread("equip/book_6_32.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("equip/book_7_32.jpg", cv.IMREAD_UNCHANGED)) + + hsv = [] + hsv.append(HsvFilter(14, 0, 0, 179, 255, 255, 0, 0, 0, 0)) + hsv.append(HsvFilter(0, 128, 0, 179, 255, 255, 0, 0, 0, 0)) + hsv.append(HsvFilter(49, 0, 0, 179, 255, 255, 0, 0, 0, 0)) + hsv.append(HsvFilter(0, 95, 137, 179, 255, 255, 0, 0, 0, 0)) + for op in range(1, 50, 1): + hsv.append(HsvFilter(0, 0, 0, 179, 255, 255, 0, 0, 0, 0)) + + for rer in range(0, len(needles), 1): + while True: + # do object detection + screenshot = capture_window.get_screenshot() + processed_screenshot = vision_stun.apply_hsv_filter(screenshot, hsv[rer]) + processed_needle = vision_stun.apply_hsv_filter(needles[rer], hsv[rer]) + rectangles = vision_stun.find(processed_screenshot, processed_needle, 0.8, 5) + # draw the detection results onto the original image + #output_image = vision_stun.draw_rectangles(processed_screenshot, rectangles) + #cv.imshow("output_image", output_image) + #cv.waitKey(150) + + if len(rectangles) is not 5: + break + + if keyboard.is_pressed('p') == True: + pause = True + print('q pressed') + break + + points = vision_stun.get_click_points(rectangles) + check_move(capture_window, vision_stun, [70, 70, rectangles[0][0] + 70, rectangles[0][1]], needles[rer], + hsv[rer], + points[1], [points[0][0] + 70, points[0][1]]) + check_move(capture_window, vision_stun, [70, 70, rectangles[0][0], rectangles[0][1] + 70], needles[rer], + hsv[rer], + points[2], [points[0][0], points[0][1] + 70]) + check_move(capture_window, vision_stun, [70, 70, rectangles[0][0] + 70, rectangles[0][1] + 70], + needles[rer], hsv[rer], + points[3], [points[0][0] + 70, points[0][1] + 70]) + move(points[4], [points[0][0], points[0][1]]) + if keyboard.is_pressed('p') == True: + pause = True + print('q pressed') + break + # debug the loop rate + print('FPS {}'.format(1 / (time() - loop_time))) + loop_time = time() + cv.waitKey(150) + + +def move(point_source, point_dest): + pydirectinput.moveTo(point_source[0], point_source[1]) + pydirectinput.mouseDown() + w = random.randint(1, 100) + cv.waitKey(250 + w) + pydirectinput.moveTo(point_dest[0], point_dest[1]) + pydirectinput.mouseUp() + cv.waitKey(250 + w) + if keyboard.is_pressed('p') == True: + pause = True + print('q pressed') + + +def check_move(capture_win, visio, rect, needl, hsv, point_source, point_dest): + screenshot_pos = capture_win.get_screenshot_by_area(rect) + processed_screenshot = visio.apply_hsv_filter(screenshot_pos, hsv) + processed_needle = visio.apply_hsv_filter(needl, hsv) + rectangles2 = visio.find(processed_screenshot, processed_needle, 0.7, 1) + # output_by_area = vision_stun.draw_rectangles(screenshot_pos2, rectangles) + # cv.imshow("output_image_by_area", output_by_area) + # cv.waitKey(150) + if len(rectangles2) == 1: + # pos 2 filled + return + else: + # pos 2 vacant + pass + move(point_source, point_dest) + if keyboard.is_pressed('p') == True: + pause = True + print('q pressed') + + +if __name__ == "__main__": + run() diff --git a/hsv_main.py b/hsv_main.py new file mode 100644 index 0000000..6d9655f --- /dev/null +++ b/hsv_main.py @@ -0,0 +1,53 @@ +import cv2 as cv +import numpy as np +import os +from time import time +from window_capture import WindowCapture +from stun_vision import StunVision +from hsvfilter import HsvFilter + +# Change the working directory to the folder this script is in. +# Doing this because I'll be putting the files from each video in their own folder on GitHub +os.chdir(os.path.dirname(os.path.abspath(__file__))) + + +# initialize the WindowCapture class +wincap = WindowCapture('Albion Online Client') +# initialize the Vision class +vision_limestone = Vision('albion_limestone_processed.jpg') +# initialize the trackbar window +vision_limestone.init_control_gui() + +# limestone HSV filter +hsv_filter = HsvFilter(0, 180, 129, 15, 229, 243, 143, 0, 67, 0) + +loop_time = time() +while(True): + + # get an updated image of the game + screenshot = wincap.get_screenshot() + + # pre-process the image + processed_image = vision_limestone.apply_hsv_filter(screenshot, hsv_filter) + + # do object detection + rectangles = vision_limestone.find(processed_image, 0.46) + + # draw the detection results onto the original image + output_image = vision_limestone.draw_rectangles(screenshot, rectangles) + + # display the processed image + cv.imshow('Processed', processed_image) + cv.imshow('Matches', output_image) + + # debug the loop rate + print('FPS {}'.format(1 / (time() - loop_time))) + loop_time = time() + + # press 'q' with the output window focused to exit. + # waits 1 ms every loop to process key presses + if cv.waitKey(1) == ord('q'): + cv.destroyAllWindows() + break + +print('Done.') \ No newline at end of file diff --git a/hsvfilter.py b/hsvfilter.py new file mode 100644 index 0000000..d537bd2 --- /dev/null +++ b/hsvfilter.py @@ -0,0 +1,16 @@ + +# custom data structure to hold the state of an HSV filter +class HsvFilter: + + def __init__(self, hMin=None, sMin=None, vMin=None, hMax=None, sMax=None, vMax=None, + sAdd=None, sSub=None, vAdd=None, vSub=None): + self.hMin = hMin + self.sMin = sMin + self.vMin = vMin + self.hMax = hMax + self.sMax = sMax + self.vMax = vMax + self.sAdd = sAdd + self.sSub = sSub + self.vAdd = vAdd + self.vSub = vSub diff --git a/magic/body_0.jpg b/magic/body_0.jpg new file mode 100644 index 0000000..18447ea Binary files /dev/null and b/magic/body_0.jpg differ diff --git a/magic/body_b.jpg b/magic/body_b.jpg new file mode 100644 index 0000000..742bef4 Binary files /dev/null and b/magic/body_b.jpg differ diff --git a/magic/body_g.jpg b/magic/body_g.jpg new file mode 100644 index 0000000..5741eec Binary files /dev/null and b/magic/body_g.jpg differ diff --git a/magic/body_orange.jpg b/magic/body_orange.jpg new file mode 100644 index 0000000..d8c4f58 Binary files /dev/null and b/magic/body_orange.jpg differ diff --git a/magic/body_p.jpg b/magic/body_p.jpg new file mode 100644 index 0000000..11c6731 Binary files /dev/null and b/magic/body_p.jpg differ diff --git a/magic/body_w.jpg b/magic/body_w.jpg new file mode 100644 index 0000000..0994ba8 Binary files /dev/null and b/magic/body_w.jpg differ diff --git a/magic/body_y.jpg b/magic/body_y.jpg new file mode 100644 index 0000000..9bcd0fb Binary files /dev/null and b/magic/body_y.jpg differ diff --git a/magic/collect.jpg b/magic/collect.jpg new file mode 100644 index 0000000..f709071 Binary files /dev/null and b/magic/collect.jpg differ diff --git a/magic/finding_0.jpg b/magic/finding_0.jpg new file mode 100644 index 0000000..afa8273 Binary files /dev/null and b/magic/finding_0.jpg differ diff --git a/magic/finding_b.jpg b/magic/finding_b.jpg new file mode 100644 index 0000000..6317681 Binary files /dev/null and b/magic/finding_b.jpg differ diff --git a/magic/finding_g.jpg b/magic/finding_g.jpg new file mode 100644 index 0000000..4760705 Binary files /dev/null and b/magic/finding_g.jpg differ diff --git a/magic/finding_orange.jpg b/magic/finding_orange.jpg new file mode 100644 index 0000000..e6204ea Binary files /dev/null and b/magic/finding_orange.jpg differ diff --git a/magic/finding_p.jpg b/magic/finding_p.jpg new file mode 100644 index 0000000..e559338 Binary files /dev/null and b/magic/finding_p.jpg differ diff --git a/magic/finding_w.jpg b/magic/finding_w.jpg new file mode 100644 index 0000000..dbecdbd Binary files /dev/null and b/magic/finding_w.jpg differ diff --git a/magic/finding_y.jpg b/magic/finding_y.jpg new file mode 100644 index 0000000..54165df Binary files /dev/null and b/magic/finding_y.jpg differ diff --git a/magic/mind_0.jpg b/magic/mind_0.jpg new file mode 100644 index 0000000..196aa99 Binary files /dev/null and b/magic/mind_0.jpg differ diff --git a/magic/mind_b.jpg b/magic/mind_b.jpg new file mode 100644 index 0000000..ecf0f93 Binary files /dev/null and b/magic/mind_b.jpg differ diff --git a/magic/mind_g.jpg b/magic/mind_g.jpg new file mode 100644 index 0000000..dd2a539 Binary files /dev/null and b/magic/mind_g.jpg differ diff --git a/magic/mind_orange.jpg b/magic/mind_orange.jpg new file mode 100644 index 0000000..d3d542a Binary files /dev/null and b/magic/mind_orange.jpg differ diff --git a/magic/mind_p.jpg b/magic/mind_p.jpg new file mode 100644 index 0000000..4855131 Binary files /dev/null and b/magic/mind_p.jpg differ diff --git a/magic/mind_w.jpg b/magic/mind_w.jpg new file mode 100644 index 0000000..cd2fc19 Binary files /dev/null and b/magic/mind_w.jpg differ diff --git a/magic/mind_y.jpg b/magic/mind_y.jpg new file mode 100644 index 0000000..d9e990b Binary files /dev/null and b/magic/mind_y.jpg differ diff --git a/magic/perceiving_0.jpg b/magic/perceiving_0.jpg new file mode 100644 index 0000000..13f67ef Binary files /dev/null and b/magic/perceiving_0.jpg differ diff --git a/magic/perceiving_b.jpg b/magic/perceiving_b.jpg new file mode 100644 index 0000000..5fd1fe7 Binary files /dev/null and b/magic/perceiving_b.jpg differ diff --git a/magic/perceiving_g.jpg b/magic/perceiving_g.jpg new file mode 100644 index 0000000..5726dfd Binary files /dev/null and b/magic/perceiving_g.jpg differ diff --git a/magic/perceiving_orange.jpg b/magic/perceiving_orange.jpg new file mode 100644 index 0000000..9ba6ba1 Binary files /dev/null and b/magic/perceiving_orange.jpg differ diff --git a/magic/perceiving_p.jpg b/magic/perceiving_p.jpg new file mode 100644 index 0000000..6358372 Binary files /dev/null and b/magic/perceiving_p.jpg differ diff --git a/magic/perceiving_w.jpg b/magic/perceiving_w.jpg new file mode 100644 index 0000000..1909ce8 Binary files /dev/null and b/magic/perceiving_w.jpg differ diff --git a/magic/perceiving_y.jpg b/magic/perceiving_y.jpg new file mode 100644 index 0000000..d205a91 Binary files /dev/null and b/magic/perceiving_y.jpg differ diff --git a/magic/physical_0.jpg b/magic/physical_0.jpg new file mode 100644 index 0000000..c6a150f Binary files /dev/null and b/magic/physical_0.jpg differ diff --git a/magic/physical_b.jpg b/magic/physical_b.jpg new file mode 100644 index 0000000..df85356 Binary files /dev/null and b/magic/physical_b.jpg differ diff --git a/magic/physical_g.jpg b/magic/physical_g.jpg new file mode 100644 index 0000000..bc59432 Binary files /dev/null and b/magic/physical_g.jpg differ diff --git a/magic/physical_orange.jpg b/magic/physical_orange.jpg new file mode 100644 index 0000000..0c44ddb Binary files /dev/null and b/magic/physical_orange.jpg differ diff --git a/magic/physical_p.jpg b/magic/physical_p.jpg new file mode 100644 index 0000000..fd28337 Binary files /dev/null and b/magic/physical_p.jpg differ diff --git a/magic/physical_w.jpg b/magic/physical_w.jpg new file mode 100644 index 0000000..52d10d7 Binary files /dev/null and b/magic/physical_w.jpg differ diff --git a/magic/physical_y.jpg b/magic/physical_y.jpg new file mode 100644 index 0000000..77896e9 Binary files /dev/null and b/magic/physical_y.jpg differ diff --git a/magic/seeing_0.jpg b/magic/seeing_0.jpg new file mode 100644 index 0000000..9bae061 Binary files /dev/null and b/magic/seeing_0.jpg differ diff --git a/magic/seeing_b.jpg b/magic/seeing_b.jpg new file mode 100644 index 0000000..ef1c6a8 Binary files /dev/null and b/magic/seeing_b.jpg differ diff --git a/magic/seeing_g.jpg b/magic/seeing_g.jpg new file mode 100644 index 0000000..03011ca Binary files /dev/null and b/magic/seeing_g.jpg differ diff --git a/magic/seeing_orange.jpg b/magic/seeing_orange.jpg new file mode 100644 index 0000000..3f35d99 Binary files /dev/null and b/magic/seeing_orange.jpg differ diff --git a/magic/seeing_p.jpg b/magic/seeing_p.jpg new file mode 100644 index 0000000..202fff1 Binary files /dev/null and b/magic/seeing_p.jpg differ diff --git a/magic/seeing_w.jpg b/magic/seeing_w.jpg new file mode 100644 index 0000000..d63da97 Binary files /dev/null and b/magic/seeing_w.jpg differ diff --git a/magic/seeing_y.jpg b/magic/seeing_y.jpg new file mode 100644 index 0000000..289e4f1 Binary files /dev/null and b/magic/seeing_y.jpg differ diff --git a/magic/spawn_0.jpg b/magic/spawn_0.jpg new file mode 100644 index 0000000..31fc3ae Binary files /dev/null and b/magic/spawn_0.jpg differ diff --git a/magic/spawn_1.jpg b/magic/spawn_1.jpg new file mode 100644 index 0000000..dd7bdc2 Binary files /dev/null and b/magic/spawn_1.jpg differ diff --git a/magic/spiritual_0.jpg b/magic/spiritual_0.jpg new file mode 100644 index 0000000..3d9eaa9 Binary files /dev/null and b/magic/spiritual_0.jpg differ diff --git a/magic/spiritual_b.jpg b/magic/spiritual_b.jpg new file mode 100644 index 0000000..45ba048 Binary files /dev/null and b/magic/spiritual_b.jpg differ diff --git a/magic/spiritual_g.jpg b/magic/spiritual_g.jpg new file mode 100644 index 0000000..a6fea40 Binary files /dev/null and b/magic/spiritual_g.jpg differ diff --git a/magic/spiritual_orange.jpg b/magic/spiritual_orange.jpg new file mode 100644 index 0000000..7ef777c Binary files /dev/null and b/magic/spiritual_orange.jpg differ diff --git a/magic/spiritual_p.jpg b/magic/spiritual_p.jpg new file mode 100644 index 0000000..f004dd0 Binary files /dev/null and b/magic/spiritual_p.jpg differ diff --git a/magic/spiritual_w.jpg b/magic/spiritual_w.jpg new file mode 100644 index 0000000..022e70e Binary files /dev/null and b/magic/spiritual_w.jpg differ diff --git a/magic/spiritual_y.jpg b/magic/spiritual_y.jpg new file mode 100644 index 0000000..153eef1 Binary files /dev/null and b/magic/spiritual_y.jpg differ diff --git a/ok_button.jpg b/ok_button.jpg new file mode 100644 index 0000000..d78f586 Binary files /dev/null and b/ok_button.jpg differ diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..2ae109b --- /dev/null +++ b/requirements.txt @@ -0,0 +1,11 @@ +opencv-python~=4.3.0.36 +pytesseract~=0.3.4 +numpy~=1.19.1 +scikit-image~=0.18.1 +mariadb~=1.0.0 +requests~=2.24.0 +pyquery~=1.4.1 +pywin32 +Pillow~=7.2.0 +pydirectinput +keyboard \ No newline at end of file diff --git a/sharpening.py b/sharpening.py new file mode 100644 index 0000000..61ac94e --- /dev/null +++ b/sharpening.py @@ -0,0 +1,67 @@ +import cv2 +import numpy as np + + + +image = cv2.imread("digging.jpg") + +cv2.imshow("Image", image) + +gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) +cv2.imshow("gray", gray) + +blur = cv2.GaussianBlur(gray, (5,5), 0) +cv2.imshow("blur", blur) + +thresh = cv2.adaptiveThreshold(blur, 255, 1, 1, 11, 2) + +#thresh = cv2.bitwise_not(thresh) + +cv2.imshow("thresh", thresh) +#cv2.waitKey() +contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + +max_area = 0 +c = 0 +for i in contours: + area = cv2.contourArea(i) + if area > 1000: + if area > max_area: + max_area = area + best_cnt = i + image = cv2.drawContours(image, contours, c, (0, 255, 0), 1) + c+=1 + +mask = np.zeros((gray.shape),np.uint8) +cv2.drawContours(mask,[best_cnt],0,255,-1) +cv2.drawContours(mask,[best_cnt],0,0,1) +cv2.imshow("mask", mask) + +out = np.zeros_like(gray) +out[mask == 255] = gray[mask == 255] +cv2.imshow("New image", out) + +blur = cv2.GaussianBlur(out, (5,5), 0) +cv2.imshow("blur1", blur) + +thresh = cv2.adaptiveThreshold(blur, 255, 1, 1, 11, 2) +thresh = cv2.bitwise_not(thresh) +cv2.imshow("thresh1", thresh) + +contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + +squares = [] +c = 0 +for i in contours: + area = cv2.contourArea(i) + cnt_len = cv2.arcLength(i, True) + cnt = cv2.approxPolyDP(i, 0.01 * cnt_len, True) + if area > 1000/2: + squares.append(cnt) + #cv2.drawContours(image, contours, c, (0, 255, 0), 1) + c+=1 +cv2.drawContours(image, squares, -1, (0, 255, 0), 1) + +cv2.imshow("Final Image", image) +cv2.waitKey(0) +cv2.destroyAllWindows() \ No newline at end of file diff --git a/squares.py b/squares.py new file mode 100644 index 0000000..c9021dc --- /dev/null +++ b/squares.py @@ -0,0 +1,44 @@ +import cv2 +import numpy as np + +def angle_cos(p0, p1, p2): + d1, d2 = (p0-p1).astype('float'), (p2-p1).astype('float') + return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) ) + +def find_squares(img): + squares = [] + gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + # cv2.imshow("gray", gray) + + gaussian = cv2.GaussianBlur(gray, (5, 5), 0) + + thresh = cv2.adaptiveThreshold(gaussian, 255, 1, 1, 11, 2) + #temp,bin = cv2.threshold(gaussian, 80, 255, cv2.THRESH_BINARY) + cv2.imshow("thresh", thresh) + + contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + cv2.drawContours( gray, contours, -1, (0, 255, 0), 3 ) + + #cv2.imshow('contours', gray) + for cnt in contours: + cnt_len = cv2.arcLength(cnt, True) + cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True) + if cv2.contourArea(cnt) > 1000 : + #cnt = cnt.reshape(-1, 2) + #max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in range(4)]) + #if max_cos < 0.1: + squares.append(cnt) + return squares + +if __name__ == '__main__': + img = cv2.imread('equip/main_screen.jpg') + + #cv2.imshow("origin", img) + + squares = find_squares(img) + print("Find %d squres" % len(squares)) + cv2.drawContours( img, squares, -1, (0, 255, 0), 3 ) + cv2.imshow('squares', img) + + cv2.waitKey() \ No newline at end of file diff --git a/stun_main.py b/stun_main.py new file mode 100644 index 0000000..31790ea --- /dev/null +++ b/stun_main.py @@ -0,0 +1,157 @@ +import random +from time import time +from window_capture import WindowCapture +from stun_vision import StunVision +import cv2 as cv +import pytesseract +from hsvfilter import HsvFilter +from config_file import UserConfigs +#import pyautogui +import pydirectinput +import keyboard +from tresh_util import super_tresh_main, super_tresh_needle + + +def run(): + # initialize the user-class + config = UserConfigs() + + # initialize the StunWindowCapture class + try: + capture_window = WindowCapture( + None, "stun", config) + video_mode = False + except: + # StunWindowCapture.list_window_names() + #print("Game not running, switching to video mode") + #capture_window = cv.VideoCapture("snip_slam.mp4") + video_mode = True + + # initialize the StunVision class + vision_stun = StunVision() + # initialize the StunOverlay class + hsv_filter = HsvFilter(0, 0, 124, 15, 255, 168, 0, 255, 0, 0) + + loop_time = time() + event_time = 0.0 + pointstore = [] + max_results = 0 + pause = True + while True: + if keyboard.is_pressed('p') == True: + pause = True + print('q pressed') + elif keyboard.is_pressed('o') == True: + pause = False + print('o pressed') + if pause: + #cv.waitKey(500) + print("pausing") + continue + + ''' + # 30 units = 46 + 1 * 30 + for f in range(0, 30, 3): + for i in range(430, 1705, 47 - f): + for e in range(45, 470, 47 - f): + + pydirectinput.moveTo(i, e) + pydirectinput.mouseDown() + w = random.randint(1, 100) + cv.waitKey(1000 + w) + pydirectinput.mouseUp() + if keyboard.is_pressed('p') == True or pause == True: + pause = True + break + if keyboard.is_pressed('p') == True or pause == True: + pause = True + break + ''' + + + if video_mode: + break + else: + try: + # get an updated image of the game + screenshot = capture_window.get_screenshot() + # screenshot = cv.imread("buffbar.jpg") + except: + capture_window.release() + print("Game window not available - shutting down application") + break + #cv.imshow("screenshot", screenshot) + #cv.waitKey(150) + #continue + + needles = [] + #needles.append(cv.imread("wtf.jpg", cv.IMREAD_UNCHANGED)) + #needles.append(cv.imread("Brown0.jpg", cv.IMREAD_UNCHANGED)) + #needles.append(cv.imread("1.jpg", cv.IMREAD_UNCHANGED)) + #needles.append(cv.imread("2.jpg", cv.IMREAD_UNCHANGED)) + #needles.append(cv.imread("3.jpg", cv.IMREAD_UNCHANGED)) + #needles.append(cv.imread("4.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("H1.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("H2.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("H3.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("H4.jpg", cv.IMREAD_UNCHANGED)) + needles.append(cv.imread("D1.jpg", cv.IMREAD_UNCHANGED)) + #needles.append(cv.imread("D2.jpg", cv.IMREAD_UNCHANGED)) + #needles.append(cv.imread("D3.jpg", cv.IMREAD_UNCHANGED)) + #needles.append(cv.imread("D3.jpg", cv.IMREAD_UNCHANGED)) + #needles.append(cv.imread("D4.jpg", cv.IMREAD_UNCHANGED)) + #needles.append(cv.imread("D5.jpg", cv.IMREAD_UNCHANGED)) + #needles.append(cv.imread("D6.jpg", cv.IMREAD_UNCHANGED)) + #needles.append(cv.imread("D7.jpg", cv.IMREAD_UNCHANGED)) + + for needle in needles: + # do object detection + #processed_image = vision_stun.apply_hsv_filter(screenshot, hsv_filter) + processed_image = super_tresh_needle(screenshot) + processed_needle = super_tresh_needle(needle) + rectangles = vision_stun.find(processed_image, processed_needle, 0.5, 1) + # draw the detection results onto the original image + output_image = vision_stun.draw_rectangles(screenshot, rectangles) + cv.imshow("output_image", output_image) + cv.waitKey(150) + + # only trigger ocr reading if a stun is detected + points = vision_stun.get_click_points(rectangles) + for point in points: + if point not in pointstore: + pointstore.append(point) + pydirectinput.moveTo(point[0], point[1]) + pydirectinput.mouseDown() + w = random.randint(1, 100) + cv.waitKey(400 + w) + pydirectinput.mouseUp() + else: + max_results = max_results + 1 + # apply missing buff + #print("buff missing" + str(i)) + + #pydirectinput.click(500,500) + #pydirectinput.press(key) + #cv.waitKey(10) + #pydirectinput.rightClick() + #pydirectinput.press("C") + #cv.waitKey(150) + #pyautogui.rightClick() + #cv.waitKey(10) + #pydirectinput.press("c") + #overlay_stun.update_stun_label("") + if keyboard.is_pressed('p') == True or pause == True: + pause = True + break + if keyboard.is_pressed('p') == True or pause == True: + pause = True + break + + # debug the loop rate + print('FPS {}'.format(1 / (time() - loop_time))) + loop_time = time() + cv.waitKey(150) + + +if __name__ == "__main__": + run() diff --git a/stun_vision.py b/stun_vision.py new file mode 100644 index 0000000..c31dc41 --- /dev/null +++ b/stun_vision.py @@ -0,0 +1,222 @@ +import cv2 as cv +import numpy as np +from hsvfilter import HsvFilter + + +class StunVision: + # constants + TRACKBAR_WINDOW = "Trackbars" + + # properties + needle_img = None + needle_w = 0 + needle_h = 0 + method = None + + # constructor + def __init__(self, method=cv.TM_CCOEFF_NORMED): + # load the image we're trying to match + # https://docs.opencv.org/4.2.0/d4/da8/group__imgcodecs.html + self.needle_img = cv.imread("wtf.jpg", cv.IMREAD_UNCHANGED) + + # Save the dimensions of the needle image + #self.needle_w = self.needle_img.shape[1] + #self.needle_h = self.needle_img.shape[0] + + # There are 6 methods to choose from: + # TM_CCOEFF, TM_CCOEFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_SQDIFF, TM_SQDIFF_NORMED + self.method = method + + def find(self, haystack_img, needle_img, threshold=0.5, max_results=10): + # run the OpenCV algorithm + needle_w = needle_img.shape[1] + needle_h = needle_img.shape[0] + result = cv.matchTemplate(haystack_img, needle_img, self.method) + + # Get the all the positions from the match result that exceed our threshold + locations = np.where(result >= threshold) + locations = list(zip(*locations[::-1])) + # print(locations) + + # if we found no results, return now. this reshape of the empty array allows us to + # concatenate together results without causing an error + if not locations: + return np.array([], dtype=np.int32).reshape(0, 4) + + # You'll notice a lot of overlapping rectangles get drawn. We can eliminate those redundant + # locations by using groupRectangles(). + # First we need to create the list of [x, y, w, h] rectangles + rectangles = [] + for loc in locations: + rect = [int(loc[0]), int(loc[1]), needle_w, needle_h] + # Add every box to the list twice in order to retain single (non-overlapping) boxes + rectangles.append(rect) + rectangles.append(rect) + # Apply group rectangles. + # The groupThreshold parameter should usually be 1. If you put it at 0 then no grouping is + # done. If you put it at 2 then an object needs at least 3 overlapping rectangles to appear + # in the result. I've set eps to 0.5, which is: + # "Relative difference between sides of the rectangles to merge them into a group." + rectangles, weights = cv.groupRectangles(rectangles, groupThreshold=1, eps=0.5) + # print(rectangles) + + # for performance reasons, return a limited number of results. + # these aren't necessarily the best results. + if len(rectangles) > max_results: + #print('Warning: too many results, raise the threshold.') + rectangles = rectangles[:max_results] + + return rectangles + + # given a list of [x, y, w, h] rectangles returned by find(), convert those into a list of + # [x, y] positions in the center of those rectangles where we can click on those found items + def get_click_points(self, rectangles): + points = [] + + # Loop over all the rectangles + for (x, y, w, h) in rectangles: + # Determine the center position + center_x = x + int(w / 2) + center_y = y + int(h / 2) + # Save the points + points.append((center_x, center_y)) + + return points + + # given a list of [x, y, w, h] rectangles and a canvas image to draw on, return an image with + # all of those rectangles drawn + def draw_rectangles(self, haystack_img, rectangles): + # these colors are actually BGR + line_color = (0, 255, 0) + line_type = cv.LINE_4 + pic = None + for (x, y, w, h) in rectangles: + # determine the box positions + top_left = (x, y) + bottom_right = (x + w, y + h) + # draw the box + cv.rectangle(haystack_img, top_left, bottom_right, line_color, lineType=line_type) + + #pic = haystack_img[y:y + h, x:x + w] + + return haystack_img + + def draw_display_picture(self, haystack_img, rectangles): + + pic = None + for (x, y, w, h) in rectangles: + pic = haystack_img[y:y + h, x:x + w] + + # scale_percent = 500 # percent of original size + # width = int(pic.shape[1] * scale_percent / 100) + # height = int(pic.shape[0] * scale_percent / 100) + # dim = (width, height) + # resize image + + # resized_pic = cv.resize(pic, dim, interpolation=cv.INTER_AREA) + pil_image = np.array(pic) + # pil_image = Image.fromarray(cv.cvtColor(pic, cv.COLOR_BGR2RGB)) + # pil_image = Image.) + return pil_image + + # given a list of [x, y] positions and a canvas image to draw on, return an image with all + # of those click points drawn on as crosshairs + def draw_crosshairs(self, haystack_img, points): + # these colors are actually BGR + marker_color = (255, 0, 255) + marker_type = cv.MARKER_CROSS + + for (center_x, center_y) in points: + # draw the center point + cv.drawMarker(haystack_img, (center_x, center_y), marker_color, marker_type) + + return haystack_img + + # create gui window with controls for adjusting arguments in real-time + def init_control_gui(self): + cv.namedWindow(self.TRACKBAR_WINDOW, cv.WINDOW_NORMAL) + cv.resizeWindow(self.TRACKBAR_WINDOW, 350, 700) + + # required callback. we'll be using getTrackbarPos() to do lookups + # instead of using the callback. + def nothing(position): + pass + + # create trackbars for bracketing. + # OpenCV scale for HSV is H: 0-179, S: 0-255, V: 0-255 + cv.createTrackbar('HMin', self.TRACKBAR_WINDOW, 0, 179, nothing) + cv.createTrackbar('SMin', self.TRACKBAR_WINDOW, 0, 255, nothing) + cv.createTrackbar('VMin', self.TRACKBAR_WINDOW, 0, 255, nothing) + cv.createTrackbar('HMax', self.TRACKBAR_WINDOW, 0, 179, nothing) + cv.createTrackbar('SMax', self.TRACKBAR_WINDOW, 0, 255, nothing) + cv.createTrackbar('VMax', self.TRACKBAR_WINDOW, 0, 255, nothing) + # Set default value for Max HSV trackbars + cv.setTrackbarPos('HMax', self.TRACKBAR_WINDOW, 179) + cv.setTrackbarPos('SMax', self.TRACKBAR_WINDOW, 255) + cv.setTrackbarPos('VMax', self.TRACKBAR_WINDOW, 255) + + # trackbars for increasing/decreasing saturation and value + cv.createTrackbar('SAdd', self.TRACKBAR_WINDOW, 0, 255, nothing) + cv.createTrackbar('SSub', self.TRACKBAR_WINDOW, 0, 255, nothing) + cv.createTrackbar('VAdd', self.TRACKBAR_WINDOW, 0, 255, nothing) + cv.createTrackbar('VSub', self.TRACKBAR_WINDOW, 0, 255, nothing) + + # returns an HSV filter object based on the control GUI values + def get_hsv_filter_from_controls(self): + # Get current positions of all trackbars + hsv_filter = HsvFilter() + hsv_filter.hMin = cv.getTrackbarPos('HMin', self.TRACKBAR_WINDOW) + hsv_filter.sMin = cv.getTrackbarPos('SMin', self.TRACKBAR_WINDOW) + hsv_filter.vMin = cv.getTrackbarPos('VMin', self.TRACKBAR_WINDOW) + hsv_filter.hMax = cv.getTrackbarPos('HMax', self.TRACKBAR_WINDOW) + hsv_filter.sMax = cv.getTrackbarPos('SMax', self.TRACKBAR_WINDOW) + hsv_filter.vMax = cv.getTrackbarPos('VMax', self.TRACKBAR_WINDOW) + hsv_filter.sAdd = cv.getTrackbarPos('SAdd', self.TRACKBAR_WINDOW) + hsv_filter.sSub = cv.getTrackbarPos('SSub', self.TRACKBAR_WINDOW) + hsv_filter.vAdd = cv.getTrackbarPos('VAdd', self.TRACKBAR_WINDOW) + hsv_filter.vSub = cv.getTrackbarPos('VSub', self.TRACKBAR_WINDOW) + return hsv_filter + + # given an image and an HSV filter, apply the filter and return the resulting image. + # if a filter is not supplied, the control GUI trackbars will be used + def apply_hsv_filter(self, original_image, hsv_filter=None): + # convert image to HSV + hsv = cv.cvtColor(original_image, cv.COLOR_BGR2HSV) + + # if we haven't been given a defined filter, use the filter values from the GUI + if not hsv_filter: + hsv_filter = self.get_hsv_filter_from_controls() + + # add/subtract saturation and value + h, s, v = cv.split(hsv) + s = self.shift_channel(s, hsv_filter.sAdd) + s = self.shift_channel(s, -hsv_filter.sSub) + v = self.shift_channel(v, hsv_filter.vAdd) + v = self.shift_channel(v, -hsv_filter.vSub) + hsv = cv.merge([h, s, v]) + + # Set minimum and maximum HSV values to display + lower = np.array([hsv_filter.hMin, hsv_filter.sMin, hsv_filter.vMin]) + upper = np.array([hsv_filter.hMax, hsv_filter.sMax, hsv_filter.vMax]) + # Apply the thresholds + mask = cv.inRange(hsv, lower, upper) + result = cv.bitwise_and(hsv, hsv, mask=mask) + + # convert back to BGR for imshow() to display it properly + img = cv.cvtColor(result, cv.COLOR_HSV2BGR) + + return img + + # apply adjustments to an HSV channel + # https://stackoverflow.com/questions/49697363/shifting-hsv-pixel-values-in-python-using-numpy + def shift_channel(self, c, amount): + if amount > 0: + lim = 255 - amount + c[c >= lim] = 255 + c[c < lim] += amount + elif amount < 0: + amount = -amount + lim = amount + c[c <= lim] = 0 + c[c > lim] -= amount + return c diff --git a/tresh_util.py b/tresh_util.py new file mode 100644 index 0000000..13b97e2 --- /dev/null +++ b/tresh_util.py @@ -0,0 +1,94 @@ +import cv2 +import numpy as np + + +def super_tresh_main(img): + image = img + + # cv2.imshow("Image", image) + + gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + # cv2.imshow("gray", gray) + + blur = cv2.GaussianBlur(gray, (5, 5), 0) + # cv2.imshow("blur", blur) + + thresh = cv2.adaptiveThreshold(blur, 255, 1, 1, 11, 2) + + # thresh = cv2.bitwise_not(thresh) + + # cv2.imshow("thresh", thresh) + # cv2.waitKey() + contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + max_area = 0 + c = 0 + for i in contours: + area = cv2.contourArea(i) + if area > 1000: + if area > max_area: + max_area = area + best_cnt = i + image = cv2.drawContours(image, contours, c, (0, 255, 0), 1) + c += 1 + + mask = np.zeros((gray.shape), np.uint8) + cv2.drawContours(mask, [best_cnt], 0, 255, -1) + cv2.drawContours(mask, [best_cnt], 0, 0, 1) + # cv2.imshow("mask", mask) + + out = np.zeros_like(gray) + out[mask == 255] = gray[mask == 255] + # cv2.imshow("New image", out) + + blur = cv2.GaussianBlur(out, (5, 5), 0) + # cv2.imshow("blur1", blur) + + thresh = cv2.adaptiveThreshold(blur, 255, 1, 1, 11, 2) + return cv2.bitwise_not(thresh) + +def super_tresh_needle(img): + image = img + + # cv2.imshow("Image", image) + + gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + # cv2.imshow("gray", gray) + + blur = cv2.GaussianBlur(gray, (5, 5), 0) + # cv2.imshow("blur", blur) + + thresh = cv2.adaptiveThreshold(blur, 255, 1, 1, 11, 2) + + return cv2.bitwise_not(thresh) +''' + # cv2.imshow("thresh", thresh) + # cv2.waitKey() + contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + max_area = 0 + c = 0 + for i in contours: + area = cv2.contourArea(i) + if area > 1000: + if area > max_area: + max_area = area + best_cnt = i + image = cv2.drawContours(image, contours, c, (0, 255, 0), 1) + c += 1 + + mask = np.zeros((gray.shape), np.uint8) + cv2.drawContours(mask, [best_cnt], 0, 255, -1) + cv2.drawContours(mask, [best_cnt], 0, 0, 1) + # cv2.imshow("mask", mask) + + out = np.zeros_like(gray) + out[mask == 255] = gray[mask == 255] + # cv2.imshow("New image", out) + + blur = cv2.GaussianBlur(out, (5, 5), 0) + # cv2.imshow("blur1", blur) + + thresh = cv2.adaptiveThreshold(blur, 255, 1, 1, 11, 2) + return cv2.bitwise_not(thresh) +''' \ No newline at end of file diff --git a/utils.py b/utils.py new file mode 100644 index 0000000..6d690d0 --- /dev/null +++ b/utils.py @@ -0,0 +1,146 @@ +import numpy as np +import os +import cv2 as cv +from PIL import Image + + +def mse(imageA, imageB): + # the 'Mean Squared Error' between the two images is the + # sum of the squared difference between the two images; + # NOTE: the two images must have the same dimension + err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2) + err /= float(imageA.shape[0] * imageA.shape[1]) + + # return the MSE, the lower the error, the more "similar" + # the two images are + return err + + +def mse_with_reshape(imageA, imageB): + # the 'Mean Squared Error' between the two images is the + # sum of the squared difference between the two images; + # NOTE: the two images must have the same dimension + + if len(imageA) == 0 or len(imageB) == 0: + return 999 + try: + imga = np.reshape(imageA, (len(imageA) * 17, 740, 3), 'C') + imgb = np.reshape(imageB, (len(imageB) * 17, 740, 3), 'C') + + err = np.sum((imga.astype("float") - imgb.astype("float")) ** 2) + err /= float(imga.shape[0] * imga.shape[1]) + + # return the MSE, the lower the error, the more "similar" + # the two images are + return err + except: + return 999 + + +def load_bl_line(): + list_store = [] + for path in os.listdir('bl-lines'): + if path.endswith(".npy"): + list_store.append(np.load("bl-lines\\" + path)) + return list_store + + +def check_for_bl_line(line_item, black_list): + for bl_item in black_list: + if mse(line_item, bl_item) < 500: + return True + return False + + +def scale_screenshot(screenshot): + scale_percent = 200 # percent of original size + width = int(screenshot.shape[1] * scale_percent / 100) + height = int(screenshot.shape[0] * scale_percent / 100) + dim = (width, height) + resized_img = cv.resize(screenshot, dim, interpolation=4) + + gray = cv.cvtColor(resized_img, cv.COLOR_BGR2GRAY) + thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU)[1] + # cv.imshow("Tresh", thresh) + # cv.waitKey(1000) + return thresh + + +def save_line_item_npy_jpg(short_pic_list): + ifr = 1 + for x in short_pic_list: + np.save("test{}".format(ifr), x) + Image.fromarray(x).save("test{}.jpg".format(ifr)) + ifr = ifr + 1 + + +def shorten_picture_input(new_pic_lst, old_pic_lst): + len_old = len(old_pic_lst) + len_new = len(new_pic_lst) + offset = 0 + offset_diff = len_new - len_old + retval = [] + + while mse_with_reshape(old_pic_lst[offset: len_old], new_pic_lst[0: len_new - offset - offset_diff]) > 500.0: + offset = offset + 1 + # print(offset) + if offset == len(old_pic_lst): + break + + if offset > 0: + retval = new_pic_lst[len_new - offset: len_new] + return retval + + +def shorten_input(new_lst, old_lst): + len_old = len(old_lst) + len_new = len(new_lst) + offset = 0 + offset_diff = len_new - len_old + retval = [] + + while old_lst[offset: len_old] != new_lst[0: len_new - offset - offset_diff]: + offset = offset + 1 + # print(offset) + + if offset > 0: + retval = new_lst[len_new - offset: len_new] + + return retval + + +def format_output(data): + new_list = data.split("\n") + + try: + while True: + # new_list.remove() ("",) + new_list.remove('\x0c') + except ValueError: + pass + return new_list + + +def update_screenshot_with_short_pic_list(short_pic_lst, lst_bl_items): + screenshot = [] + # check if shorten line pictures list has any entries + if len(short_pic_lst) > 0: + index_lst = [] + # check if any of the line item pictures is on the blacklist and save index + for x in range(len(short_pic_lst)): + if check_for_bl_line(short_pic_lst[x], lst_bl_items): + index_lst.append(x) + + # remove blacklisted items from shorten list + r_short_pic_lst = np.delete(short_pic_lst, index_lst, axis=0) + # check if further shortened line pictures list has any entries + if len(r_short_pic_lst) == 0: + return screenshot + + # combine shorten line picture list to one picture + screenshot = np.reshape(r_short_pic_lst, (len(r_short_pic_lst) * 17, 740, 3), 'C') + + # saves all line item pictures to disk for blacklist evaluation + # save_line_item_npy_jpg(r_short_pic_lst) + + return screenshot diff --git a/window_capture.py b/window_capture.py new file mode 100644 index 0000000..90f0547 --- /dev/null +++ b/window_capture.py @@ -0,0 +1,136 @@ +import numpy as np +import win32con +import win32gui +import win32ui +from config_file import UserConfigs + + +class WindowCapture: + + # properties + w = 0 + h = 0 + hwnd = None + cropped_x = 0 + cropped_y = 0 + offset_x = 0 + offset_y = 0 + + # constructor + def __init__(self, window_name, area, config): + # find the handle for the window we want to capture. + # if no window name is given, capture the entire screen + if window_name is None: + self.hwnd = win32gui.GetDesktopWindow() + else: + self.hwnd = win32gui.FindWindow(None, window_name) + if not self.hwnd: + raise Exception('Window not found: {}'.format(window_name)) + + if area == "poison": + val = config.returnPoisonWindowPos() + else: + val = config.returnStunWindowPos() + + self.w = val[0] + self.h = val[1] + self.cropped_x = val[2] + self.cropped_y = val[3] + + + + def get_screenshot(self): + # get the window image data + wDC = win32gui.GetWindowDC(self.hwnd) + dcObj = win32ui.CreateDCFromHandle(wDC) + cDC = dcObj.CreateCompatibleDC() + dataBitMap = win32ui.CreateBitmap() + dataBitMap.CreateCompatibleBitmap(dcObj, self.w, self.h) + cDC.SelectObject(dataBitMap) + cDC.BitBlt((0, 0), (self.w, self.h), dcObj, (self.cropped_x, self.cropped_y), win32con.SRCCOPY) + + # convert the raw data into a format opencv can read + #dataBitMap.SaveBitmapFile(cDC, 'debug.bmp') + signedIntsArray = dataBitMap.GetBitmapBits(True) + img = np.fromstring(signedIntsArray, dtype='uint8') + img.shape = (self.h, self.w, 4) + + # free resources + dcObj.DeleteDC() + cDC.DeleteDC() + win32gui.ReleaseDC(self.hwnd, wDC) + win32gui.DeleteObject(dataBitMap.GetHandle()) + + # drop the alpha channel, or cv.matchTemplate() will throw an error like: + # error: (-215:Assertion failed) (depth == CV_8U || depth == CV_32F) && type == _templ.type() + # && _img.dims() <= 2 in function 'cv::matchTemplate' + img = img[...,:3] + + # make image C_CONTIGUOUS to avoid errors that look like: + # File ... in draw_rectangles + # TypeError: an integer is required (got type tuple) + # see the discussion here: + # https://github.com/opencv/opencv/issues/14866#issuecomment-580207109 + img = np.ascontiguousarray(img) + + return img + + def get_screenshot_by_area(self, area): + + w_local = area[0] + h_local = area[1] + cropped_x_local = area[2] + cropped_y_local = area[3] + + # get the window image data + wDC = win32gui.GetWindowDC(self.hwnd) + dcObj = win32ui.CreateDCFromHandle(wDC) + cDC = dcObj.CreateCompatibleDC() + dataBitMap = win32ui.CreateBitmap() + dataBitMap.CreateCompatibleBitmap(dcObj, w_local, h_local) + cDC.SelectObject(dataBitMap) + cDC.BitBlt((0, 0), (w_local, h_local), dcObj, (cropped_x_local, cropped_y_local), win32con.SRCCOPY) + + # convert the raw data into a format opencv can read + #dataBitMap.SaveBitmapFile(cDC, 'debug.bmp') + signedIntsArray = dataBitMap.GetBitmapBits(True) + img = np.fromstring(signedIntsArray, dtype='uint8') + img.shape = (h_local, w_local, 4) + + # free resources + dcObj.DeleteDC() + cDC.DeleteDC() + win32gui.ReleaseDC(self.hwnd, wDC) + win32gui.DeleteObject(dataBitMap.GetHandle()) + + # drop the alpha channel, or cv.matchTemplate() will throw an error like: + # error: (-215:Assertion failed) (depth == CV_8U || depth == CV_32F) && type == _templ.type() + # && _img.dims() <= 2 in function 'cv::matchTemplate' + img = img[...,:3] + + # make image C_CONTIGUOUS to avoid errors that look like: + # File ... in draw_rectangles + # TypeError: an integer is required (got type tuple) + # see the discussion here: + # https://github.com/opencv/opencv/issues/14866#issuecomment-580207109 + img = np.ascontiguousarray(img) + + return img + + # find the name of the window you're interested in. + # once you have it, update window_capture() + # https://stackoverflow.com/questions/55547940/how-to-get-a-list-of-the-name-of-every-open-window + @staticmethod + def list_window_names(): + def winEnumHandler(hwnd, ctx): + if win32gui.IsWindowVisible(hwnd): + print(hex(hwnd), win32gui.GetWindowText(hwnd)) + win32gui.EnumWindows(winEnumHandler, None) + + # translate a pixel position on a screenshot image to a pixel position on the screen. + # pos = (x, y) + # WARNING: if you move the window being captured after execution is started, this will + # return incorrect coordinates, because the window position is only calculated in + # the __init__ constructor. + def get_screen_position(self, pos): + return (pos[0] + self.offset_x, pos[1] + self.offset_y) diff --git a/wtf.jpg b/wtf.jpg new file mode 100644 index 0000000..2b3242f Binary files /dev/null and b/wtf.jpg differ