refactor dig
This commit is contained in:
@@ -78,29 +78,29 @@ def run():
|
||||
needles.append(cv.imread("dig/3.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("dig/4.jpg", cv.IMREAD_UNCHANGED))
|
||||
else:
|
||||
needles.append(cv.imread("H1.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("H2.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("H3.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("H4.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("D1.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("D2.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("D3.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("D3.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("D4.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("D5.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("D6.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("D7.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("dig/H1.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("dig/H2.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("dig/H3.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("dig/H4.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("dig/D1.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("dig/D2.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("dig/D3.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("dig/D3.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("dig/D4.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("dig/D5.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("dig/D6.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("dig/D7.jpg", cv.IMREAD_UNCHANGED))
|
||||
|
||||
for needle in needles:
|
||||
# do object detection
|
||||
screenshot = capture_window.get_screenshot()
|
||||
rectangles = vision_stun.find(screenshot, needle, 0.7, 1)
|
||||
rectangles = vision_stun.find(screenshot, needle, 0.8, 1)
|
||||
# draw the detection results onto the original image
|
||||
if len(rectangles) == 0:
|
||||
continue
|
||||
# output_image = vision_stun.draw_rectangles(screenshot, rectangles)
|
||||
# cv.imshow("output_image", output_image)
|
||||
# cv.waitKey(150)
|
||||
#output_image = vision_stun.draw_rectangles(screenshot, rectangles)
|
||||
#cv.imshow("output_image", output_image)
|
||||
#cv.waitKey(150)
|
||||
|
||||
# only trigger ocr reading if a stun is detected
|
||||
points = vision_stun.get_click_points(rectangles)
|
||||
@@ -116,8 +116,8 @@ def run():
|
||||
|
||||
left = int(round(rectangles[0][0] / size, 0)) # 4
|
||||
down = int(round(rectangles[0][1] / size, 0)) # 23
|
||||
offset_left = config.returnMagicWindowPos()[2]
|
||||
offset_down = config.returnMagicWindowPos()[3]
|
||||
offset_left = config.returnDiggingWindowPos()[2]
|
||||
offset_down = config.returnDiggingWindowPos()[3]
|
||||
# 167 1055 start
|
||||
# 3x47 left 26x right to 30
|
||||
# 1x down 22x up to 24
|
||||
@@ -130,7 +130,7 @@ def run():
|
||||
pydirectinput.moveTo(i + offset_left, f + offset_down)
|
||||
pydirectinput.mouseDown()
|
||||
w = random.randint(1, 50)
|
||||
cv.waitKey(850 + w)
|
||||
cv.waitKey(150 + w)
|
||||
pydirectinput.mouseUp()
|
||||
if keyboard.is_pressed('p') == True or pause == True:
|
||||
pause = True
|
||||
@@ -138,7 +138,7 @@ def run():
|
||||
|
||||
if PLOT_TO_USE == "main_plot":
|
||||
screenshot = capture_window.get_screenshot()
|
||||
rectangles = vision_stun.find(screenshot, cv.imread("dig/ok_button.jpg", cv.IMREAD_UNCHANGED), 0.8,
|
||||
rectangles = vision_stun.find(screenshot, cv.imread("dig/ok_button.jpg", cv.IMREAD_UNCHANGED), 0.5,
|
||||
1)
|
||||
# draw the detection results onto the original image
|
||||
output_image = vision_stun.draw_rectangles(screenshot, rectangles)
|
||||
|
||||
@@ -1,208 +0,0 @@
|
||||
import random
|
||||
from time import time
|
||||
from window_capture import WindowCapture
|
||||
from vision import Vision
|
||||
import cv2 as cv
|
||||
import pytesseract
|
||||
from hsvfilter import HsvFilter
|
||||
from config_file import UserConfigs
|
||||
# import pyautogui
|
||||
import pydirectinput
|
||||
import keyboard
|
||||
from tresh_util import super_tresh_main, super_tresh_needle
|
||||
|
||||
|
||||
def run():
|
||||
# initialize the user-class
|
||||
config = UserConfigs()
|
||||
|
||||
# initialize the StunWindowCapture class
|
||||
try:
|
||||
capture_window = WindowCapture(
|
||||
None, "stun", config)
|
||||
video_mode = False
|
||||
except:
|
||||
# StunWindowCapture.list_window_names()
|
||||
# print("Game not running, switching to video mode")
|
||||
# capture_window = cv.VideoCapture("snip_slam.mp4")
|
||||
video_mode = True
|
||||
|
||||
# initialize the StunVision class
|
||||
vision_stun = Vision()
|
||||
# initialize the StunOverlay class
|
||||
hsv_filter = HsvFilter(0, 0, 124, 15, 255, 168, 0, 255, 0, 0)
|
||||
|
||||
loop_time = time()
|
||||
event_time = 0.0
|
||||
pointstore = []
|
||||
max_results = 0
|
||||
pause = True
|
||||
while True:
|
||||
if keyboard.is_pressed('p') == True:
|
||||
pause = True
|
||||
print('q pressed')
|
||||
elif keyboard.is_pressed('o') == True:
|
||||
pause = False
|
||||
print('o pressed')
|
||||
if pause:
|
||||
# cv.waitKey(500)
|
||||
print("pausing")
|
||||
continue
|
||||
|
||||
if video_mode:
|
||||
break
|
||||
else:
|
||||
try:
|
||||
# get an updated image of the game
|
||||
screenshot = capture_window.get_screenshot()
|
||||
# screenshot = cv.imread("buffbar.jpg")
|
||||
except:
|
||||
capture_window.release()
|
||||
print("Game window not available - shutting down application")
|
||||
break
|
||||
# cv.imshow("screenshot", screenshot)
|
||||
# cv.waitKey(150)
|
||||
# continue
|
||||
|
||||
spawn_1 = vision_stun.find(screenshot, cv.imread("equip/amu_e2_32.jpg", cv.IMREAD_UNCHANGED), 0.4, 1)
|
||||
if len(spawn_1) == 1:
|
||||
spawn_button_active = True
|
||||
points = vision_stun.get_click_points(spawn_1)
|
||||
for i in range(0, 15, 1):
|
||||
pydirectinput.moveTo(points[0][0], points[0][1])
|
||||
pydirectinput.mouseDown()
|
||||
w = random.randint(1, 50)
|
||||
cv.waitKey(30 + w)
|
||||
pydirectinput.mouseUp()
|
||||
if keyboard.is_pressed('p') == True:
|
||||
pause = True
|
||||
print('q pressed')
|
||||
break
|
||||
else:
|
||||
spawn_0 = vision_stun.find(screenshot, cv.imread("equip/amu_e_32.jpg", cv.IMREAD_UNCHANGED), 0.7, 1)
|
||||
points = vision_stun.get_click_points(spawn_0)
|
||||
for point in points:
|
||||
pydirectinput.moveTo(point[0], point[1])
|
||||
pydirectinput.mouseDown()
|
||||
cv.waitKey(500)
|
||||
pydirectinput.mouseUp()
|
||||
if keyboard.is_pressed('p') == True:
|
||||
pause = True
|
||||
print('q pressed')
|
||||
break
|
||||
continue
|
||||
|
||||
needles = []
|
||||
|
||||
|
||||
needles.append(cv.imread("equip/book_1_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("equip/book_2_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("equip/book_3_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("equip/book_4_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("equip/book_5_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
|
||||
needles.append(cv.imread("equip/amu_1_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("equip/amu_2_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("equip/amu_3_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("equip/amu_4_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("equip/amu_5_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("equip/amu_6_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("equip/amu_7_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
|
||||
needles.append(cv.imread("equip/bag_1_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("equip/bag_2_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("equip/bag_3_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("equip/bag_4_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("equip/bag_5_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
needles.append(cv.imread("equip/bag_6_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
# needles.append(cv.imread("equip/bag_7_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
|
||||
|
||||
# needles.append(cv.imread("equip/book_6_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
# needles.append(cv.imread("equip/book_7_32.jpg", cv.IMREAD_UNCHANGED))
|
||||
|
||||
hsv = []
|
||||
hsv.append(HsvFilter(0, 128, 0, 179, 255, 255, 0, 0, 0, 0))
|
||||
hsv.append(HsvFilter(49, 0, 0, 179, 255, 255, 0, 0, 0, 0))
|
||||
hsv.append(HsvFilter(0, 0, 0, 179, 255, 196, 0, 0, 0, 0))
|
||||
hsv.append(HsvFilter(0, 156, 0, 179, 255, 255, 0, 0, 0, 0))
|
||||
hsv.append(HsvFilter(0, 95, 137, 179, 255, 255, 0, 0, 0, 0))
|
||||
for op in range(1, 50, 1):
|
||||
hsv.append(HsvFilter(0, 0, 0, 179, 255, 255, 0, 0, 0, 0))
|
||||
|
||||
for rer in range(0, len(needles), 1):
|
||||
while True:
|
||||
# do object detection
|
||||
screenshot = capture_window.get_screenshot()
|
||||
processed_screenshot = vision_stun.apply_hsv_filter(screenshot, hsv[rer])
|
||||
processed_needle = vision_stun.apply_hsv_filter(needles[rer], hsv[rer])
|
||||
rectangles = vision_stun.find(processed_screenshot, processed_needle, 0.8, 5)
|
||||
# draw the detection results onto the original image
|
||||
#output_image = vision_stun.draw_rectangles(processed_screenshot, rectangles)
|
||||
#cv.imshow("output_image", output_image)
|
||||
#cv.waitKey(150)
|
||||
|
||||
if len(rectangles) is not 5:
|
||||
break
|
||||
|
||||
if keyboard.is_pressed('p') == True:
|
||||
pause = True
|
||||
print('q pressed')
|
||||
break
|
||||
|
||||
points = vision_stun.get_click_points(rectangles)
|
||||
check_move(capture_window, vision_stun, [70, 70, rectangles[0][0] + 70, rectangles[0][1]], needles[rer],
|
||||
hsv[rer],
|
||||
points[1], [points[0][0] + 70, points[0][1]])
|
||||
check_move(capture_window, vision_stun, [70, 70, rectangles[0][0], rectangles[0][1] + 70], needles[rer],
|
||||
hsv[rer],
|
||||
points[2], [points[0][0], points[0][1] + 70])
|
||||
check_move(capture_window, vision_stun, [70, 70, rectangles[0][0] + 70, rectangles[0][1] + 70],
|
||||
needles[rer], hsv[rer],
|
||||
points[3], [points[0][0] + 70, points[0][1] + 70])
|
||||
move(points[4], [points[0][0], points[0][1]])
|
||||
if keyboard.is_pressed('p') == True:
|
||||
pause = True
|
||||
print('q pressed')
|
||||
break
|
||||
# debug the loop rate
|
||||
print('FPS {}'.format(1 / (time() - loop_time)))
|
||||
loop_time = time()
|
||||
cv.waitKey(150)
|
||||
|
||||
|
||||
def move(point_source, point_dest):
|
||||
pydirectinput.moveTo(point_source[0], point_source[1])
|
||||
pydirectinput.mouseDown()
|
||||
w = random.randint(1, 100)
|
||||
cv.waitKey(250 + w)
|
||||
pydirectinput.moveTo(point_dest[0], point_dest[1])
|
||||
pydirectinput.mouseUp()
|
||||
cv.waitKey(250 + w)
|
||||
if keyboard.is_pressed('p') == True:
|
||||
pause = True
|
||||
print('q pressed')
|
||||
|
||||
|
||||
def check_move(capture_win, visio, rect, needl, hsv, point_source, point_dest):
|
||||
screenshot_pos = capture_win.get_screenshot_by_area(rect)
|
||||
processed_screenshot = visio.apply_hsv_filter(screenshot_pos, hsv)
|
||||
processed_needle = visio.apply_hsv_filter(needl, hsv)
|
||||
rectangles2 = visio.find(processed_screenshot, processed_needle, 0.7, 1)
|
||||
# output_by_area = vision_stun.draw_rectangles(screenshot_pos2, rectangles)
|
||||
# cv.imshow("output_image_by_area", output_by_area)
|
||||
# cv.waitKey(150)
|
||||
if len(rectangles2) == 1:
|
||||
# pos 2 filled
|
||||
return
|
||||
else:
|
||||
# pos 2 vacant
|
||||
pass
|
||||
move(point_source, point_dest)
|
||||
if keyboard.is_pressed('p') == True:
|
||||
pause = True
|
||||
print('q pressed')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
||||
@@ -17,7 +17,7 @@ class Vision:
|
||||
def __init__(self, method=cv.TM_CCOEFF_NORMED):
|
||||
# load the image we're trying to match
|
||||
# https://docs.opencv.org/4.2.0/d4/da8/group__imgcodecs.html
|
||||
self.needle_img = cv.imread("dig/wtf.jpg", cv.IMREAD_UNCHANGED)
|
||||
# self.needle_img = cv.imread("dig/wtf.jpg", cv.IMREAD_UNCHANGED)
|
||||
|
||||
# Save the dimensions of the needle image
|
||||
#self.needle_w = self.needle_img.shape[1]
|
||||
|
||||
Reference in New Issue
Block a user