This commit is contained in:
2022-04-29 02:06:44 +02:00
parent b187022918
commit ebafbdf4b2
128 changed files with 1619 additions and 0 deletions

164
digging_main.py Normal file
View File

@@ -0,0 +1,164 @@
import random
from time import time
from window_capture import WindowCapture
from stun_vision import StunVision
import cv2 as cv
import pytesseract
from hsvfilter import HsvFilter
from config_file import UserConfigs
#import pyautogui
import pydirectinput
import keyboard
from tresh_util import super_tresh_main, super_tresh_needle
def run():
# initialize the user-class
config = UserConfigs()
# initialize the StunWindowCapture class
try:
capture_window = WindowCapture(
None, "stun", config)
video_mode = False
except:
# StunWindowCapture.list_window_names()
#print("Game not running, switching to video mode")
#capture_window = cv.VideoCapture("snip_slam.mp4")
video_mode = True
# initialize the StunVision class
vision_stun = StunVision()
# initialize the StunOverlay class
hsv_filter = HsvFilter(0, 0, 124, 15, 255, 168, 0, 255, 0, 0)
loop_time = time()
event_time = 0.0
pointstore = []
max_results = 0
pause = True
while True:
if keyboard.is_pressed('p') == True:
pause = True
print('q pressed')
elif keyboard.is_pressed('o') == True:
pause = False
print('o pressed')
if pause:
#cv.waitKey(500)
print("pausing")
continue
if video_mode:
break
else:
try:
# get an updated image of the game
screenshot = capture_window.get_screenshot()
# screenshot = cv.imread("buffbar.jpg")
except:
capture_window.release()
print("Game window not available - shutting down application")
break
#cv.imshow("screenshot", screenshot)
#cv.waitKey(150)
#continue
needles = []
needles.append(cv.imread("wtf.jpg", cv.IMREAD_UNCHANGED))
needles.append(cv.imread("Brown0.jpg", cv.IMREAD_UNCHANGED))
needles.append(cv.imread("1.jpg", cv.IMREAD_UNCHANGED))
needles.append(cv.imread("2.jpg", cv.IMREAD_UNCHANGED))
needles.append(cv.imread("3.jpg", cv.IMREAD_UNCHANGED))
needles.append(cv.imread("4.jpg", cv.IMREAD_UNCHANGED))
needles.append(cv.imread("H1.jpg", cv.IMREAD_UNCHANGED))
needles.append(cv.imread("H2.jpg", cv.IMREAD_UNCHANGED))
needles.append(cv.imread("H3.jpg", cv.IMREAD_UNCHANGED))
needles.append(cv.imread("H4.jpg", cv.IMREAD_UNCHANGED))
needles.append(cv.imread("D1.jpg", cv.IMREAD_UNCHANGED))
needles.append(cv.imread("D2.jpg", cv.IMREAD_UNCHANGED))
needles.append(cv.imread("D3.jpg", cv.IMREAD_UNCHANGED))
needles.append(cv.imread("D3.jpg", cv.IMREAD_UNCHANGED))
needles.append(cv.imread("D4.jpg", cv.IMREAD_UNCHANGED))
needles.append(cv.imread("D5.jpg", cv.IMREAD_UNCHANGED))
needles.append(cv.imread("D6.jpg", cv.IMREAD_UNCHANGED))
needles.append(cv.imread("D7.jpg", cv.IMREAD_UNCHANGED))
for needle in needles:
# do object detection
screenshot = capture_window.get_screenshot()
rectangles = vision_stun.find(screenshot, needle, 0.7, 1)
# draw the detection results onto the original image
if len(rectangles) == 0:
continue
#output_image = vision_stun.draw_rectangles(screenshot, rectangles)
#cv.imshow("output_image", output_image)
#cv.waitKey(150)
# only trigger ocr reading if a stun is detected
points = vision_stun.get_click_points(rectangles)
for point in points:
# 46 + 1 * 30 1410 == 1815 - 405 return [1410, 1128, 402, 22]
# 44 + 1 * 30 1350 == 1790 - 440
# left_border = 402, 432
# right_border = 1812, 1782
# upper_border = 22, 50
# lower_border = 1150, 1120
size = rectangles[0][2] + 1
left = int(round(rectangles[0][0] / size, 0)) # 4
down = int(round(rectangles[0][1] / size, 0)) # 23
offset_left = config.returnStunWindowPos()[2]
offset_down = config.returnStunWindowPos()[3]
# 167 1055 start
# 3x47 left 26x right to 30
# 1x down 22x up to 24
# start 167, end 167 - (47 * 3), step -47
start_left = point[0] - (size * left)
start_up = point[1] - (size * down)
for f in range(start_up, start_up + (size * 24), size):
for i in range(start_left, start_left + (size * 30), size):
pydirectinput.moveTo(i + offset_left, f + offset_down)
pydirectinput.mouseDown()
w = random.randint(1, 100)
cv.waitKey(150 + w)
pydirectinput.mouseUp()
if keyboard.is_pressed('p') == True or pause == True:
pause = True
break
screenshot = capture_window.get_screenshot()
rectangles = vision_stun.find(screenshot, cv.imread("ok_button.jpg", cv.IMREAD_UNCHANGED), 0.8, 1)
# draw the detection results onto the original image
output_image = vision_stun.draw_rectangles(screenshot, rectangles)
if len(rectangles) == 1:
pointis = vision_stun.get_click_points(rectangles)
for pointi in pointis:
pydirectinput.moveTo(pointi[0] + offset_left, pointi[1] + offset_down)
pydirectinput.mouseDown()
w = random.randint(1, 100)
cv.waitKey(150 + w)
pydirectinput.mouseUp()
if keyboard.is_pressed('p') == True or pause == True:
pause = True
break
if keyboard.is_pressed('p') == True or pause == True:
pause = True
break
if keyboard.is_pressed('p') == True or pause == True:
pause = True
break
# debug the loop rate
print('FPS {}'.format(1 / (time() - loop_time)))
loop_time = time()
cv.waitKey(150)
if __name__ == "__main__":
run()