SHARE
TWEET

Capteha Api Script

a guest Jan 6th, 2020 74 Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. #!/usr/bin/env python3
  2. # Fridosleigh.com CAPTEHA API - Made by Krampus Hollyfeld
  3. import requests
  4. import json
  5. import sys
  6. import base64
  7. import os
  8. os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
  9. import tensorflow as tf
  10. tf.logging.set_verbosity(tf.logging.ERROR)
  11. import numpy as np
  12. import threading
  13. import queue
  14. import time
  15. import re
  16. import shutil
  17.  
  18. def load_labels(label_file):
  19.     label = []
  20.     proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
  21.     for l in proto_as_ascii_lines:
  22.         label.append(l.rstrip())
  23.     return label
  24.  
  25. def predict_image(q, sess, graph, image_bytes, img_full_path, labels, input_operation, output_operation):
  26.     image = read_tensor_from_image_bytes(image_bytes)
  27.     results = sess.run(output_operation.outputs[0], {
  28.         input_operation.outputs[0]: image
  29.     })
  30.     results = np.squeeze(results)
  31.     prediction = results.argsort()[-5:][::-1][0]
  32.     q.put( {'img_full_path':img_full_path, 'prediction':labels[prediction].title(), 'percent':results[prediction]} )
  33.  
  34. def load_graph(model_file):
  35.     graph = tf.Graph()
  36.     graph_def = tf.GraphDef()
  37.     with open(model_file, "rb") as f:
  38.         graph_def.ParseFromString(f.read())
  39.     with graph.as_default():
  40.         tf.import_graph_def(graph_def)
  41.     return graph
  42.  
  43. def read_tensor_from_image_bytes(imagebytes, input_height=299, input_width=299, input_mean=0, input_std=255):
  44.     image_reader = tf.image.decode_png( imagebytes, channels=3, name="png_reader")
  45.     float_caster = tf.cast(image_reader, tf.float32)
  46.     dims_expander = tf.expand_dims(float_caster, 0)
  47.     resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
  48.     normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
  49.     sess = tf.compat.v1.Session()
  50.     result = sess.run(normalized)
  51.     return result
  52.  
  53. def run_ml():
  54.     # Loading the Trained Machine Learning Model created from running retrain.py on the training_images directory
  55.     graph = load_graph('/tmp/retrain_tmp/output_graph.pb')
  56.     labels = load_labels("/tmp/retrain_tmp/output_labels.txt")
  57.  
  58.     # Load up our session
  59.     input_operation = graph.get_operation_by_name("import/Placeholder")
  60.     output_operation = graph.get_operation_by_name("import/final_result")
  61.     sess = tf.compat.v1.Session(graph=graph)
  62.  
  63.     # Can use queues and threading to spead up the processing
  64.     q = queue.Queue()
  65.     unknown_images_dir = 'capteha'
  66.     unknown_images = os.listdir(unknown_images_dir)
  67.    
  68.     #Going to interate over each of our images.
  69.     for image in unknown_images:
  70.         img_full_path = '{}/{}'.format(unknown_images_dir, image)
  71.        
  72.         #print('Processing Image {}'.format(img_full_path))
  73.         # We don't want to process too many images at once. 10 threads max
  74.         while len(threading.enumerate()) > 5:
  75.             time.sleep(0.0001)
  76.  
  77.         #predict_image function is expecting png image bytes so we read image as 'rb' to get a bytes object
  78.         image_bytes = open(img_full_path,'rb').read()
  79.         threading.Thread(target=predict_image, args=(q, sess, graph, image_bytes, img_full_path, labels, input_operation, output_operation)).start()
  80.    
  81.     print('Waiting For Threads to Finish...')
  82.     while q.qsize() < len(unknown_images):
  83.         time.sleep(0.0001)
  84.    
  85.     #getting a list of all threads returned results
  86.     prediction_results = [q.get() for x in range(q.qsize())]
  87.     return prediction_results
  88.  
  89.  
  90. def main():
  91.     yourREALemailAddress = "z89127866x@gmail.com"
  92.  
  93.     # Creating a session to handle cookies
  94.     s = requests.Session()
  95.     url = "https://fridosleigh.com/"
  96.  
  97.     json_resp = json.loads(s.get("{}api/capteha/request".format(url)).text)
  98.     b64_images = json_resp['images']                    # A list of dictionaries eaching containing the keys 'base64' and 'uuid'
  99.     challenge_image_type = json_resp['select_type'].split(',')     # The Image types the CAPTEHA Challenge is looking for.
  100.     challenge_image_types = [challenge_image_type[0].strip(), challenge_image_type[1].strip(), challenge_image_type[2].replace(' and ','').strip()] # cleaning and formatting
  101.  
  102.     print(challenge_image_types)
  103.  
  104.     # Output Capteha Challege Images
  105.     for item in b64_images:
  106.         with open('capteha/'+item['uuid']+'.png', "wb+") as fh:
  107.              i=item['base64'].encode()
  108.              fh.write(base64.decodebytes(i))
  109.              fh.close()
  110.    
  111.     correctuuid = []
  112.     results = run_ml()
  113.     for prediction in results:
  114.         if prediction['prediction'] in challenge_image_types:
  115.              print(prediction['img_full_path']+','+prediction['prediction'] )
  116.              match = re.match(r"capteha/(.*).png",prediction['img_full_path'])
  117.              correctuuid.append(match.group(1))
  118.    
  119.     '''
  120.    MISSING IMAGE PROCESSING AND ML IMAGE PREDICTION CODE GOES HERE
  121.    '''
  122.    
  123.     # This should be JUST a csv list image uuids ML predicted to match the challenge_image_type .
  124.     final_answer = ','.join(correctuuid)
  125.    
  126.     json_resp = json.loads(s.post("{}api/capteha/submit".format(url), data={'answer':final_answer}).text)
  127.     if not json_resp['request']:
  128.         # If it fails just run again. ML might get one wrong occasionally
  129.         print('FAILED MACHINE LEARNING GUESS')
  130.         print('--------------------\nOur ML Guess:\n--------------------\n{}'.format(final_answer))
  131.         print('--------------------\nServer Response:\n--------------------\n{}'.format(json_resp['data']))
  132.         sys.exit(1)
  133.  
  134.     print('CAPTEHA Solved!')
  135.     # If we get to here, we are successful and can submit a bunch of entries till we win
  136.     userinfo = {
  137.         'name':'Krampus Hollyfeld',
  138.         'email':yourREALemailAddress,
  139.         'age':180,
  140.         'about':"Cause they're so flippin yummy!",
  141.         'favorites':'thickmints'
  142.     }
  143.     # If we win the once-per minute drawing, it will tell us we were emailed.
  144.     # Should be no more than 200 times before we win. If more, somethings wrong.
  145.     entry_response = ''
  146.     entry_count = 1
  147.     while yourREALemailAddress not in entry_response and entry_count < 200:
  148.         print('Submitting lots of entries until we win the contest! Entry #{}'.format(entry_count))
  149.         entry_response = s.post("{}api/entry".format(url), data=userinfo).text
  150.         entry_count += 1
  151.     print(entry_response)
  152.  
  153. if __name__ == "__main__":
  154.     main()
RAW Paste Data
We use cookies for various purposes including analytics. By continuing to use Pastebin, you agree to our use of cookies as described in the Cookies Policy. OK, I Understand
Top