import random from time import time from window_capture import WindowCapture from vision import Vision import cv2 as cv import pytesseract from hsvfilter import HsvFilter from config_file import UserConfigs # import pyautogui import pydirectinput import keyboard from tresh_util import super_tresh_main, super_tresh_needle def run(): # initialize the user-class config = UserConfigs() # initialize the StunWindowCapture class try: capture_window = WindowCapture( None, "stun", config) video_mode = False except: # StunWindowCapture.list_window_names() # print("Game not running, switching to video mode") # capture_window = cv.VideoCapture("snip_slam.mp4") video_mode = True # initialize the StunVision class vision_stun = Vision() # initialize the StunOverlay class hsv_filter = HsvFilter(0, 0, 124, 15, 255, 168, 0, 255, 0, 0) loop_time = time() event_time = 0.0 pointstore = [] max_results = 0 pause = True while True: if keyboard.is_pressed('p') == True: pause = True print('q pressed') elif keyboard.is_pressed('o') == True: pause = False print('o pressed') if pause: # cv.waitKey(500) print("pausing") continue if video_mode: break else: try: # get an updated image of the game screenshot = capture_window.get_screenshot() # screenshot = cv.imread("buffbar.jpg") except: capture_window.release() print("Game window not available - shutting down application") break # cv.imshow("screenshot", screenshot) # cv.waitKey(150) # continue spawn_1 = vision_stun.find(screenshot, cv.imread("equip/amu_e2_32.jpg", cv.IMREAD_UNCHANGED), 0.4, 1) if len(spawn_1) == 1: spawn_button_active = True points = vision_stun.get_click_points(spawn_1) for i in range(0, 15, 1): pydirectinput.moveTo(points[0][0], points[0][1]) pydirectinput.mouseDown() w = random.randint(1, 50) cv.waitKey(30 + w) pydirectinput.mouseUp() if keyboard.is_pressed('p') == True: pause = True print('q pressed') break else: spawn_0 = vision_stun.find(screenshot, cv.imread("equip/amu_e_32.jpg", cv.IMREAD_UNCHANGED), 0.7, 1) points = vision_stun.get_click_points(spawn_0) for point in points: pydirectinput.moveTo(point[0], point[1]) pydirectinput.mouseDown() cv.waitKey(500) pydirectinput.mouseUp() if keyboard.is_pressed('p') == True: pause = True print('q pressed') break continue needles = [] needles.append(cv.imread("equip/book_1_32.jpg", cv.IMREAD_UNCHANGED)) needles.append(cv.imread("equip/book_2_32.jpg", cv.IMREAD_UNCHANGED)) needles.append(cv.imread("equip/book_3_32.jpg", cv.IMREAD_UNCHANGED)) needles.append(cv.imread("equip/book_4_32.jpg", cv.IMREAD_UNCHANGED)) needles.append(cv.imread("equip/book_5_32.jpg", cv.IMREAD_UNCHANGED)) needles.append(cv.imread("equip/amu_1_32.jpg", cv.IMREAD_UNCHANGED)) needles.append(cv.imread("equip/amu_2_32.jpg", cv.IMREAD_UNCHANGED)) needles.append(cv.imread("equip/amu_3_32.jpg", cv.IMREAD_UNCHANGED)) needles.append(cv.imread("equip/amu_4_32.jpg", cv.IMREAD_UNCHANGED)) needles.append(cv.imread("equip/amu_5_32.jpg", cv.IMREAD_UNCHANGED)) needles.append(cv.imread("equip/amu_6_32.jpg", cv.IMREAD_UNCHANGED)) needles.append(cv.imread("equip/amu_7_32.jpg", cv.IMREAD_UNCHANGED)) needles.append(cv.imread("equip/bag_1_32.jpg", cv.IMREAD_UNCHANGED)) needles.append(cv.imread("equip/bag_2_32.jpg", cv.IMREAD_UNCHANGED)) needles.append(cv.imread("equip/bag_3_32.jpg", cv.IMREAD_UNCHANGED)) needles.append(cv.imread("equip/bag_4_32.jpg", cv.IMREAD_UNCHANGED)) needles.append(cv.imread("equip/bag_5_32.jpg", cv.IMREAD_UNCHANGED)) needles.append(cv.imread("equip/bag_6_32.jpg", cv.IMREAD_UNCHANGED)) # needles.append(cv.imread("equip/bag_7_32.jpg", cv.IMREAD_UNCHANGED)) # needles.append(cv.imread("equip/book_6_32.jpg", cv.IMREAD_UNCHANGED)) # needles.append(cv.imread("equip/book_7_32.jpg", cv.IMREAD_UNCHANGED)) hsv = [] hsv.append(HsvFilter(0, 128, 0, 179, 255, 255, 0, 0, 0, 0)) hsv.append(HsvFilter(49, 0, 0, 179, 255, 255, 0, 0, 0, 0)) hsv.append(HsvFilter(0, 0, 0, 179, 255, 196, 0, 0, 0, 0)) hsv.append(HsvFilter(0, 156, 0, 179, 255, 255, 0, 0, 0, 0)) hsv.append(HsvFilter(0, 95, 137, 179, 255, 255, 0, 0, 0, 0)) for op in range(1, 50, 1): hsv.append(HsvFilter(0, 0, 0, 179, 255, 255, 0, 0, 0, 0)) for rer in range(0, len(needles), 1): while True: # do object detection screenshot = capture_window.get_screenshot() processed_screenshot = vision_stun.apply_hsv_filter(screenshot, hsv[rer]) processed_needle = vision_stun.apply_hsv_filter(needles[rer], hsv[rer]) rectangles = vision_stun.find(processed_screenshot, processed_needle, 0.8, 5) # draw the detection results onto the original image #output_image = vision_stun.draw_rectangles(processed_screenshot, rectangles) #cv.imshow("output_image", output_image) #cv.waitKey(150) if len(rectangles) is not 5: break if keyboard.is_pressed('p') == True: pause = True print('q pressed') break points = vision_stun.get_click_points(rectangles) check_move(capture_window, vision_stun, [70, 70, rectangles[0][0] + 70, rectangles[0][1]], needles[rer], hsv[rer], points[1], [points[0][0] + 70, points[0][1]]) check_move(capture_window, vision_stun, [70, 70, rectangles[0][0], rectangles[0][1] + 70], needles[rer], hsv[rer], points[2], [points[0][0], points[0][1] + 70]) check_move(capture_window, vision_stun, [70, 70, rectangles[0][0] + 70, rectangles[0][1] + 70], needles[rer], hsv[rer], points[3], [points[0][0] + 70, points[0][1] + 70]) move(points[4], [points[0][0], points[0][1]]) if keyboard.is_pressed('p') == True: pause = True print('q pressed') break # debug the loop rate print('FPS {}'.format(1 / (time() - loop_time))) loop_time = time() cv.waitKey(150) def move(point_source, point_dest): pydirectinput.moveTo(point_source[0], point_source[1]) pydirectinput.mouseDown() w = random.randint(1, 100) cv.waitKey(250 + w) pydirectinput.moveTo(point_dest[0], point_dest[1]) pydirectinput.mouseUp() cv.waitKey(250 + w) if keyboard.is_pressed('p') == True: pause = True print('q pressed') def check_move(capture_win, visio, rect, needl, hsv, point_source, point_dest): screenshot_pos = capture_win.get_screenshot_by_area(rect) processed_screenshot = visio.apply_hsv_filter(screenshot_pos, hsv) processed_needle = visio.apply_hsv_filter(needl, hsv) rectangles2 = visio.find(processed_screenshot, processed_needle, 0.7, 1) # output_by_area = vision_stun.draw_rectangles(screenshot_pos2, rectangles) # cv.imshow("output_image_by_area", output_by_area) # cv.waitKey(150) if len(rectangles2) == 1: # pos 2 filled return else: # pos 2 vacant pass move(point_source, point_dest) if keyboard.is_pressed('p') == True: pause = True print('q pressed') if __name__ == "__main__": run()