Release Fortnite Neural Network Python Hack +Tutorial How to install?

tel1014

New Member
Messages
4
Reaction score
1
Points
1
Warning: It does not always detect every player.

How to setup the hack?
So to set this up you first need to install python 2.7.14 or above, and a editor of your choice. I am using the Atom editor.
Please, Log in or Register to view URLs content!

Please, Log in or Register to view URLs content!

install the .exe file and once it is done, run it and setup python. Next find where you put your python folder and type env into the search bar at the bottom left. do edit enviornment variable for your account and go to path in the top half. select it and press edit. Make two new paths and put these two in each, or whatever your pythons path is.
C:\Python27\Scripts
C:\Python27\

the first one is to be able to use pip anywhere and the second is to use python anywhere through commandline. next install whatever text editor you are using and just leave it for now. Next go to your command prompt and put in the following commands 1 at a time.
pip install pywin32
pip install keyboard
pip install pygame
pip install pyscreenshot
pip install imutils
pip install numpy
pip install argparse
pip install opencv-python
pip install pyautogui

these are to install the dependancys that the python script needs to run.
now that this is done open up your text editor and make a new .py file. You can call it whatever you want, and it should look like this: hack.py. if it looks like hack.py.txt dont worry, just rename it and you will be fine. once you have the document made copy paste the code in and edit it however you want. This is how im using it:
Code:

Code:
# USAGE
# python real_time_object_detection.py --prototxt MobileNetSSD_deploy.prototxt.txt --model MobileNetSSD_deploy.caffemodel
# import the necessary packages
import win32api
import keyboard as keyboard
import pygame as pygame
import pythoncom
import win32con
from PIL import ImageGrab
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import cv2
import pyautogui
# construct the argument parse and parse the arguments
from keyboard._mouse_event import RIGHT
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=False,
                help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=False,
                help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.6,
                help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
prott1 = r'C:\Users\ianyy\Documents\MobileNetSSD_deploy.prototxt.txt'
prott2 = r'C:\Users\ianyy\Documents\MobileNetSSD_deploy.caffemodel'
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
           "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
           "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
           "sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(prott1, prott2)
# initialize the video stream, allow the cammera sensor to warmup,
# and initialize the FPS counter
print("[INFO] starting video stream...")
#vs = VideoStream(src=0).start()
#time.sleep(2.0)
#fps = FPS().start()
# loop over the frames from the video stream
HSX = 100;
LSX = 1000;
HSY = 100;
LSY = 1000;
HEX = 100;
LEX = 1000;
HEY = 100;
LEY = 1000;
while True:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    frame = np.array(ImageGrab.grab(bbox=(0, 40, 1820, 1240)))
    # frame = imutils.resize(frame, width=400)
    # grab the frame dimensions and convert it to a blob
    (h, w) = frame.shape[:2]
    blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
                                 0.007843, (300, 300), 127.5)
    # pass the blob through the network and obtain the detections and
    # predictions
    net.setInput(blob)
    detections = net.forward()
    # loop over the detections
    for i in np.arange(0, detections.shape[2]):
        # extract the confidence (i.e., probability) associated with
        # the prediction
        confidence = detections[0, 0, i, 2]
        # filter out weak detections by ensuring the `confidence` is
        # greater than the minimum confidence
        if confidence > args["confidence"]:
            # extract the index of the class label from the
            # `detections`, then compute the (x, y)-coordinates of
            # the bounding box for the object
            idx = int(detections[0, 0, i, 1])
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype("int")
            # draw the prediction on the frame
            label = "{}: {:.2f}%".format(CLASSES[idx],
                                         confidence * 100)
            cv2.rectangle(frame, (startX, startY), (endX, endY),
                          COLORS[idx], 2)
            y = startY - 15 if startY - 15 > 15 else startY + 15
            cv2.putText(frame, label, (startX, y),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
           if 'person' in label:
                pygame.init()
                pygame.event.get()
                if pygame.mouse.get_pressed():
                    print 'pressing'
                    #tried to detect my character's offset and add the best way to exclude it, failed most tests.
                    if startX > 369 & startX < 1402 & startY > -1 & startY < 725 & endX > 339 & endX < 1805 & endY > 806 & endY < 1017:
                        print 'found myself'
                    else:
                        #print 'found somebody else'
                        nosum = int(round(startX * 1)) + int(round(startX * 0.06))
                        nosum2 = int(round(y * 1)) + int(round(y * 0.7))
                        halfX = (endX - startX) / 2
                        halfY = (endY - startY) / 2
                        finalX = startX + halfX
                        finalY = startY + halfY
                    #    pyautogui.moveTo(finalX, finalY)
                        #win32api.SetCursorPos((finalX, finalY))
                    #    win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, finalX, finalY, 0, 0)
                    #    win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, finalX, finalY, 0, 0)
                        #print 'Pressed L'
                    if 'HSX' not in locals():
                        HSX = startX
                    if 'LSX' not in locals():
                        LSX = startX
                    if 'HSY' not in locals():
                        HSY = startY
                    if 'LSY' not in locals():
                        LSY = startY
                    if 'HEX' not in locals():
                        HEX = endX
                    if 'LEX' not in locals():
                        LEX = endX
                    if 'HEY' not in locals():
                        HEY = endY
                    if 'LEY' not in locals():
                        LEY = endY
                    if startX > HSX:
                        HSX = startX
                    if startX < LSX:
                        LSX = startX
                    if startY > HSY:
                        HSY = startY
                    if startY < LSY:
                        LSY = startY
                    if endX > HEX:
                        HEX = endX
                    if endX < LEX:
                        LEX = endX
                    if endY > HEY:
                        HEY = endY
                    if endY < LEY:
                        LEY = endY
                    print 'HStartX: ' + str(HSX)
                    print 'LStartX: ' + str(LSX)
                    print 'HStartY: ' + str(HSY)
                    print 'LStartY: ' + str(LSY)
                    print 'HendX: ' + str(HEX)
                    print 'LendX: ' + str(LEX)
                    print 'HendY: ' + str(HEY)
                    print 'LendY: ' + str(LEY)
                #print args["confidence"]
#             click(10,10)
    # show the output frame
    cv2.imshow("Frame", frame)
    key = cv2.waitKey(1) & 0xFF
    # if the `k` key was pressed, break from the loop
    if key == ord("k"):
        break
    # update the FPS counter
# stop the timer and display FPS information
# do a bit of cleanup
cv2.destroyAllWindows()
Once you have gotten your code in make sure to save it and remember what directory it is in. now go to the search bar and open command prompt and it should look something like this : C:\Users\[YOUR USERNAME]> now use cd to navigate to the directory it is in. if it is in C:\Python27 then just do
cd C:\Python27 and it will put you in the directory and then do
python hack.py to run the script.

If you have any questions put them here as a comment.



Edit: also one more thing, the code i pasted in here simply just comments out the part that moves your mouse to shoot
 
Last edited by a moderator:

Marc Swinther

Insane-Known Member
Messages
177
Reaction score
71
Points
388
Thanks for sharing, needs testing.
Would you be able to provide some photos or a video. Also explaining what this is for, might be a good idea.
 

thegaotu

New Member
Messages
3
Reaction score
0
Points
1
frame = np.array(ImageGrab.grab(bbox=(0, 40, 1820, 1240)))
is where i get a "indentationError: expected an indented block?
How would i fix that?
 

CabCon

Head Administrator
Staff member
Head Staff Team
Messages
5,093
Reaction score
2,881
Points
1,103
Warning: It does not always detect every player.

How to setup the hack?
So to set this up you first need to install python 2.7.14 or above, and a editor of your choice. I am using the Atom editor.
Please, Log in or Register to view URLs content!

Please, Log in or Register to view URLs content!

install the .exe file and once it is done, run it and setup python. Next find where you put your python folder and type env into the search bar at the bottom left. do edit enviornment variable for your account and go to path in the top half. select it and press edit. Make two new paths and put these two in each, or whatever your pythons path is.
C:\Python27\Scripts
C:\Python27\

the first one is to be able to use pip anywhere and the second is to use python anywhere through commandline. next install whatever text editor you are using and just leave it for now. Next go to your command prompt and put in the following commands 1 at a time.
pip install pywin32
pip install keyboard
pip install pygame
pip install pyscreenshot
pip install imutils
pip install numpy
pip install argparse
pip install opencv-python
pip install pyautogui

these are to install the dependancys that the python script needs to run.
now that this is done open up your text editor and make a new .py file. You can call it whatever you want, and it should look like this: hack.py. if it looks like hack.py.txt dont worry, just rename it and you will be fine. once you have the document made copy paste the code in and edit it however you want. This is how im using it:
Code:

Code:
# USAGE
# python real_time_object_detection.py --prototxt MobileNetSSD_deploy.prototxt.txt --model MobileNetSSD_deploy.caffemodel
# import the necessary packages
import win32api
import keyboard as keyboard
import pygame as pygame
import pythoncom
import win32con
from PIL import ImageGrab
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import cv2
import pyautogui
# construct the argument parse and parse the arguments
from keyboard._mouse_event import RIGHT
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=False,
                help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=False,
                help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.6,
                help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
prott1 = r'C:\Users\ianyy\Documents\MobileNetSSD_deploy.prototxt.txt'
prott2 = r'C:\Users\ianyy\Documents\MobileNetSSD_deploy.caffemodel'
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
           "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
           "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
           "sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(prott1, prott2)
# initialize the video stream, allow the cammera sensor to warmup,
# and initialize the FPS counter
print("[INFO] starting video stream...")
#vs = VideoStream(src=0).start()
#time.sleep(2.0)
#fps = FPS().start()
# loop over the frames from the video stream
HSX = 100;
LSX = 1000;
HSY = 100;
LSY = 1000;
HEX = 100;
LEX = 1000;
HEY = 100;
LEY = 1000;
while True:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    frame = np.array(ImageGrab.grab(bbox=(0, 40, 1820, 1240)))
    # frame = imutils.resize(frame, width=400)
    # grab the frame dimensions and convert it to a blob
    (h, w) = frame.shape[:2]
    blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
                                 0.007843, (300, 300), 127.5)
    # pass the blob through the network and obtain the detections and
    # predictions
    net.setInput(blob)
    detections = net.forward()
    # loop over the detections
    for i in np.arange(0, detections.shape[2]):
        # extract the confidence (i.e., probability) associated with
        # the prediction
        confidence = detections[0, 0, i, 2]
        # filter out weak detections by ensuring the `confidence` is
        # greater than the minimum confidence
        if confidence > args["confidence"]:
            # extract the index of the class label from the
            # `detections`, then compute the (x, y)-coordinates of
            # the bounding box for the object
            idx = int(detections[0, 0, i, 1])
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype("int")
            # draw the prediction on the frame
            label = "{}: {:.2f}%".format(CLASSES[idx],
                                         confidence * 100)
            cv2.rectangle(frame, (startX, startY), (endX, endY),
                          COLORS[idx], 2)
            y = startY - 15 if startY - 15 > 15 else startY + 15
            cv2.putText(frame, label, (startX, y),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
           if 'person' in label:
                pygame.init()
                pygame.event.get()
                if pygame.mouse.get_pressed():
                    print 'pressing'
                    #tried to detect my character's offset and add the best way to exclude it, failed most tests.
                    if startX > 369 & startX < 1402 & startY > -1 & startY < 725 & endX > 339 & endX < 1805 & endY > 806 & endY < 1017:
                        print 'found myself'
                    else:
                        #print 'found somebody else'
                        nosum = int(round(startX * 1)) + int(round(startX * 0.06))
                        nosum2 = int(round(y * 1)) + int(round(y * 0.7))
                        halfX = (endX - startX) / 2
                        halfY = (endY - startY) / 2
                        finalX = startX + halfX
                        finalY = startY + halfY
                    #    pyautogui.moveTo(finalX, finalY)
                        #win32api.SetCursorPos((finalX, finalY))
                    #    win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, finalX, finalY, 0, 0)
                    #    win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, finalX, finalY, 0, 0)
                        #print 'Pressed L'
                    if 'HSX' not in locals():
                        HSX = startX
                    if 'LSX' not in locals():
                        LSX = startX
                    if 'HSY' not in locals():
                        HSY = startY
                    if 'LSY' not in locals():
                        LSY = startY
                    if 'HEX' not in locals():
                        HEX = endX
                    if 'LEX' not in locals():
                        LEX = endX
                    if 'HEY' not in locals():
                        HEY = endY
                    if 'LEY' not in locals():
                        LEY = endY
                    if startX > HSX:
                        HSX = startX
                    if startX < LSX:
                        LSX = startX
                    if startY > HSY:
                        HSY = startY
                    if startY < LSY:
                        LSY = startY
                    if endX > HEX:
                        HEX = endX
                    if endX < LEX:
                        LEX = endX
                    if endY > HEY:
                        HEY = endY
                    if endY < LEY:
                        LEY = endY
                    print 'HStartX: ' + str(HSX)
                    print 'LStartX: ' + str(LSX)
                    print 'HStartY: ' + str(HSY)
                    print 'LStartY: ' + str(LSY)
                    print 'HendX: ' + str(HEX)
                    print 'LendX: ' + str(LEX)
                    print 'HendY: ' + str(HEY)
                    print 'LendY: ' + str(LEY)
                #print args["confidence"]
#             click(10,10)
    # show the output frame
    cv2.imshow("Frame", frame)
    key = cv2.waitKey(1) & 0xFF
    # if the `k` key was pressed, break from the loop
    if key == ord("k"):
        break
    # update the FPS counter
# stop the timer and display FPS information
# do a bit of cleanup
cv2.destroyAllWindows()
Once you have gotten your code in make sure to save it and remember what directory it is in. now go to the search bar and open command prompt and it should look something like this : C:\Users\[YOUR USERNAME]> now use cd to navigate to the directory it is in. if it is in C:\Python27 then just do
cd C:\Python27 and it will put you in the directory and then do
python hack.py to run the script.

If you have any questions put them here as a comment.



Edit: also one more thing, the code i pasted in here simply just comments out the part that moves your mouse to shoot
Like @Marc Swinther already mentioned. Can we get any preview of it?
 

CabCon

Head Administrator
Staff member
Head Staff Team
Messages
5,093
Reaction score
2,881
Points
1,103
I also found this tutorial, it might help you:
 

osmand

New Member
Messages
1
Reaction score
0
Points
0
C:\Python27>hacking.py
File "C:\Python27\hacking.py", line 90
if 'person' in label:
^
IndentationError: unindent does not match any outer indentation level

C:\Python27>
 

cobrasteel

New Member
Messages
1
Reaction score
0
Points
1
Same error here
if 'person' in label:
^
IndentationError: unindent does not match any outer indentation level
 

genesis

New Member
Messages
1
Reaction score
0
Points
1
I reindent your code and updated it for python 3 but I'have just one problem with the following path in lines 28 and 29 :

prott1 = r'C:\Users\ianyy\Documents\MobileNetSSD_deploy.prototxt.txt'
prott2 = r'C:\Users\ianyy\Documents\MobileNetSSD_deploy.caffemodel'

I know that I must change them with my computer session but the major problem is that You don't give us the 2 files that are :

MobileNetSSD_deploy.prototxt.txt and MobileNetSSD_deploy.caffemodel.

Can you give them to the community thx in advance or not XD.

For the peoples who want the right code in python 3 and + (line 28 and 29 are to change :wink: ) N.B. if the indentation goes off with the publication you just need to push over the touch Tab in your atom editor with your code :smile: :

# USAGE
# python real_time_object_detection.py --prototxt MobileNetSSD_deploy.prototxt.txt --model MobileNetSSD_deploy.caffemodel
# import the necessary packages
import win32api
import keyboard as keyboard
import pygame as pygame
import pythoncom
import win32con
from PIL import ImageGrab
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import cv2
import pyautogui
# construct the argument parse and parse the arguments
from keyboard._mouse_event import RIGHT
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=False,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=False,
help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.6,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
prott1 = r'C:\Users\ianyy\Documents\MobileNetSSD_deploy.prototxt.txt'
prott2 = r'C:\Users\ianyy\Documents\MobileNetSSD_deploy.caffemodel'
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(prott1, prott2)
# initialize the video stream, allow the cammera sensor to warmup,
# and initialize the FPS counter
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
fps = FPS().start()
# loop over the frames from the video stream
HSX = 100;
LSX = 1000;
HSY = 100;
LSY = 1000;
HEX = 100;
LEX = 1000;
HEY = 100;
LEY = 1000;
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = np.array(ImageGrab.grab(bbox=(0, 40, 1820, 1240)))
# frame = imutils.resize(frame, width=400)
# grab the frame dimensions and convert it to a blob
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
# pass the blob through the network and obtain the detections and
# predictions
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > args["confidence"]:
# extract the index of the class label from the
# `detections`, then compute the (x, y)-coordinates of
# the bounding box for the object
idx = int(detections[0, 0, i, 1])
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# draw the prediction on the frame
label = "{}: {:.2f}%".format(CLASSES[idx],
confidence * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY),
COLORS[idx], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
if 'person' in label:
pygame.init()
pygame.event.get()
if pygame.mouse.get_pressed():
print ('pressing')
#tried to detect my character's offset and add the best way to exclude it, failed most tests.
if startX > 369 & startX < 1402 & startY > -1 & startY < 725 & endX > 339 & endX < 1805 & endY > 806 & endY < 1017:
print ('found myself')
else:
#print 'found somebody else'
nosum = int(round(startX * 1)) + int(round(startX * 0.06))
nosum2 = int(round(y * 1)) + int(round(y * 0.7))
halfX = (endX - startX) / 2
halfY = (endY - startY) / 2
finalX = startX + halfX
finalY = startY + halfY
pyautogui.moveTo(finalX, finalY)
win32api.SetCursorPos((finalX, finalY))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, finalX, finalY, 0, 0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, finalX, finalY, 0, 0)
print ('Pressed L')
if 'HSX' not in locals():
HSX = startX
if 'LSX' not in locals():
LSX = startX
if 'HSY' not in locals():
HSY = startY
if 'LSY' not in locals():
LSY = startY
if 'HEX' not in locals():
HEX = endX
if 'LEX' not in locals():
LEX = endX
if 'HEY' not in locals():
HEY = endY
if 'LEY' not in locals():
LEY = endY
if startX > HSX:
HSX = startX
if startX < LSX:
LSX = startX
if startY > HSY:
HSY = startY
if startY < LSY:
LSY = startY
if endX > HEX:
HEX = endX
if endX < LEX:
LEX = endX
if endY > HEY:
HEY = endY
if endY < LEY:
LEY = endY
print ('HStartX: ' + str(HSX))
print ('LStartX: ' + str(LSX))
print ('HStartY: ' + str(HSY))
print ('LStartY: ' + str(LSY))
print ('HendX: ' + str(HEX))
print ('LendX: ' + str(LEX))
print ('HendY: ' + str(HEY))
print ('LendY: ' + str(LEY))
print (args["confidence"])
click(10,10)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `k` key was pressed, break from the loop
if key == ord("k"):
break
# update the FPS counter
# stop the timer and display FPS information
# do a bit of cleanup
cv2.destroyAllWindows()
 

lkakashv

New Member
Messages
1
Reaction score
0
Points
0
attached is the python 3.8 version working code for 2k screen resolution, also you can download pre-trained model there ->
Please, Log in or Register to view URLs content!
 

Attachments

  • code.txt
    7 KB · Views: 434
Top