#!/usr/bin/python2
import os, sys, json, subprocess, shutil, time
import compare_jsons_final
DEFAULT_PATH_TO_PRODMARK_GO = 'C:/Job/prodmark/bin.amd64-windows.release/prodmark_go.exe'
DEFAULT_PATH_TO_INIT_PATH = 'C:/Job/prodmark/data'
DEFAULT_PATH_TO_DEBUG_IMG_DIR = 'C:/Job/prodmark/debug_images'
DEFAULT_PATH_TO_ERROR_FILE = 'prodmark_runner_errors.txt'
def print_usage():
print 'Usage: %s <images_dir> <markup_dir> <lst_file> <wiki or dev output (1 or 0)> <int or ext interface (1 or 0)> [<runner> <init-path> <debug-dir>]' % sys.argv[0]
failed_jobs = 0
def process_job((clip_dir, markup_path, compare_path, stdout_file, stderr_file)):
global failed_jobs, time_best, time_worst, time_avg
args = [PATH_TO_PRODMARK_GO, '--clip', clip_dir, '--markup', markup_path, '--init-path', PATH_TO_INIT_PATH, '--debug-img-path', PATH_TO_DEBUG_IMG_DIR, '--use-internal-interface', USE_INTERNAL_INTERFACE]
#print args
error_file = []
all_serials = []
if not os.path.exists(markup_path):
if os.path.exists(os.path.split(markup_path)[0]):
with open(markup_path, 'w+') as CREATE_FILE:
pass
else:
os.makedirs(os.path.split(markup_path)[0])
with open(markup_path, 'w+') as CREATE_FILE:
pass
try:
with open(stdout_file, 'w') as stdout_stream:
with open(stderr_file, 'w') as stderr_stream:
subprocess.check_call(args, stdout = stdout_stream, stderr = stderr_stream)
except Exception as e:
error_file.append('Runner failed at job ONE ' + str(clip_dir))
for lol in args:
error_file.append(str(lol))
failed_jobs += 1
pass
result = None
try:
with open(stdout_file, 'r') as stdout_stream:
result = json.load(stdout_stream)
except Exception as e:
error_file.append('Runner failed at job TWO' + str(clip_dir))
for lol in args:
error_file.append(str(lol))
pass
if result is not None:
markup = None
try:
with open(markup_path, 'r') as markup_stream:
markup = json.load(markup_stream)
for zone in markup['zones']:
all_serials.append(str(zone['serial']))
except Exception as e:
with open(stdout_file, 'r') as stdout_stream:
markup = json.load(stdout_stream)
with open(markup_path, 'w') as markup_stream:
json.dump(markup, markup_stream, indent = 2)
compare = compare_jsons_final.compare_results(markup, result)
with open(compare_path, 'w') as compare_stream:
json.dump(compare, compare_stream, indent = 2)
with open(PATH_TO_ERROR_FILE, 'a') as err_file:
for line in error_file:
if line != None:
err_file.write(line + '\n')
with open('c:/serials.txt', 'a') as err_file:
for line in all_serials:
if line != None:
err_file.write(line + '\n')
def compute_statistics(compare_list, toJson):
global failed_jobs
created = 0
correct = 0
fatal_errors = 0
digit_count = 0
digit_correct = 0
digit_t_count = {}
digit_t_count['serial'] = [0,0]
digit_t_count['row'] = [0,0]
digit_t_count['place'] = [0,0]
vertice_count = 0
vertice_correct = 0
error_file = { }
error_type = 0
error_subtype = 0
error_zones = 0
error_zones_geometry = 0
error_num_recog = 0
error_vertice_find = 0
error_poor_rejection = 0
error_unknown = 0
doctypes = {}
doctypes['TAG_R1'] = [0, 0]
doctypes['TAG_R2'] = [0, 0]
doctypes['TAG_Y1'] = [0, 0]
doctypes['TAG_N1'] = [0, 0]
error_file["DocType"] = [ ]
error_file["DocSubtype"] = [ ]
error_file["Zones"] = [ ]
error_file["ZonesGeom"] = [ ]
error_file["NumRecog"] = [ ]
error_file["ZoneFind"] = [ ]
error_file["PoorRejection"] = {}
error_file["PoorRejection"]["binarization_fault"] = []
error_file["PoorRejection"]["segmentation row place failed"] = []
error_file["PoorRejection"]["segmentation serial failed"] = []
error_file["Unknown"] = [ ]
total = len(compare_list)
num = 1
for filename in compare_list:
#print '[%d/%d]' % (num, len(compare_list)) # Showing current checking pic
num += 1
if os.path.exists(filename):
created += 1
with open(filename, 'r') as jsonstream:
compare = json.load(jsonstream)
doctypes[compare["original_document_type"]][1] += 1
if compare["isRejection"] == 1:
correct += 1
continue
elif compare['RejectionReason'] == "":
if compare["NO_FATAL_ERRORS"] == 0:
fatal_errors += 1
if compare["error_type"] == "Wrong document type":
error_type += 1
error_file["DocType"].append(compare["original_document_type"] + " " + os.path.split(filename)[1][:-5])
elif compare["error_type"] == "Wrong document subtype":
error_subtype += 1
error_file["DocSubtype"].append(compare["original_document_type"] + " " + os.path.split(filename)[1][:-5])
elif compare["error_type"] == "Wrong number of zones":
error_zones += 1
error_file["Zones"].append(compare["original_document_type"] + " " + os.path.split(filename)[1][:-5])
elif compare["error_type"] == "Wrong number of zones geometry":
error_zones_geometry += 1
error_file["ZonesGeom"].append(compare["original_document_type"] + " " + os.path.split(filename)[1][:-5])
else:
error_unknown += 1
error_file["Unknown"].append(compare["original_document_type"] + " " + os.path.split(filename)[1][:-5])
continue
has_error = 0
error = 0
if "zones" in compare: # it will be false in case of tag_n only
for zone in compare["zones"]:
for key, value in zone.items():
if zone[key]["correct"] == 0:
error = 1
has_error = 1
digit_count += zone[key]["overall_digits"]
digit_correct += zone[key]["correct_digits"]
digit_t_count[key][0] += zone[key]["correct_digits"]
digit_t_count[key][1] += zone[key]["overall_digits"]
if error == 1:
error_num_recog += 1
fatal_errors += 1
error_file["NumRecog"].append(compare["original_document_type"] + " " + os.path.split(filename)[1][:-5])
error = 0
for zone in compare["zones_geometry"]:
for key, value in zone.items():
if zone[key]["overall"] != 4:
error = 1
has_error = 1
vertice_count += 4
vertice_correct += zone[key]["overall"]
if error == 1:
error_vertice_find += 1
fatal_errors += 1
error_file["ZoneFind"].append(compare["original_document_type"] + " " + os.path.split(filename)[1][:-5])
error = 0
if has_error == 0:
doctypes[compare["original_document_type"]][0] += 1
correct += 1
else:
error_poor_rejection += 1
error_file["PoorRejection"][compare["RejectionReason"]].append(compare["original_document_type"] + " " + os.path.split(filename)[1][:-5])
continue
if digit_t_count['place'][1] == 0:
digit_t_count['place'][1] = 1
if digit_t_count['row'][1] == 0:
digit_t_count['row'][1] = 1
with open(PATH_TO_ERROR_FILE, 'a') as err_file:
err_file.write('\nFatal errors:\n')
for err, values in error_file.items():
if len(values) > 0:
err_file.write(err + ':\n')
try:
dict_check = dict(values)
for name, val in values.items():
err_file.write(' ' + name + ":" + '\n')
for v in val:
err_file.write('\t' + str(v) + '\n')
except Exception as e:
for val in values:
err_file.write('\t' + str(val) + '\n')
if digit_count == 0:
digit_correct = 1
digit_count = 1
if vertice_count == 0:
vertice_correct = 1
vertice_count = 1
if digit_t_count['serial'] == 0:
digit_t_count['serial'] = 1
for key, value in doctypes.items():
if doctypes[key][1] == 0:
doctypes[key][0] = 1
doctypes[key][1] = 1
if toJson == '0':
print '''\n\tValid %d/%d,\n\tCorrect %d/%d,\n\tDigits %d/%d,\n\tVertices %d/%d,\n\n\tTotal: %.2f %%
\t Nums: %.2f %% (serial %.2f %%, row %.2f %%, place %.2f %%)
\t Geom: %.2f %%\n\tWith %d failed jobs and %d algorithmic errors:\n\t DocType %d, DocSubtype %d, Zones %d
\t ZonesGeom %d, Recog %d, ZoneFinder %d, Unknown %d
\tRejected by mistake: %d
\n\tTime: average %.2fs (best %.2fs, worst %.2fs)''' % \
(created, total, correct, total, digit_correct, digit_count, vertice_correct, vertice_count, \
100.0 * correct / total, 100.0 * digit_correct / digit_count, 100.0 * digit_t_count['serial'][0] / digit_t_count['serial'][1], \
100.0 * digit_t_count['row'][0] / digit_t_count['row'][1], 100.0 * digit_t_count['place'][0] / digit_t_count['place'][1], \
100.0 * vertice_correct / vertice_count, failed_jobs, fatal_errors, error_type, error_subtype, error_zones, error_zones_geometry, \
error_num_recog, error_vertice_find, error_unknown, error_poor_rejection, time_avg / total, time_best, time_worst)
else:
json_result = { }
json_result['Correct'] = '%d/%d' % (correct, total)
json_result['Correct%'] = '%.2f %%' % (100.0 * correct / total)
json_result['TAG_R1%'] = '%.2f %%' % (100.0 * doctypes['TAG_R1'][0] / doctypes['TAG_R1'][1])
json_result['TAG_R2%'] = '%.2f %%' % (100.0 * doctypes['TAG_R2'][0] / doctypes['TAG_R2'][1])
json_result['TAG_Y%'] = '%.2f %%' % (100.0 * doctypes['TAG_Y1'][0] / doctypes['TAG_Y1'][1])
json_result['TAG_N%'] = '%.2f %%' % (100.0 * doctypes['TAG_N1'][0] / doctypes['TAG_N1'][1])
json_result['DigitsCorrect%'] = '%.2f %%' % (100.0 * digit_correct / digit_count)
#json_result['VerticesCorrect%'] = '%.2f %%' % (100.0 * vertice_correct / vertice_count)
json_result['DocTypeErrors'] = '%d' % (error_type)
json_result['DocSubtypeErrors'] = '%d' % (error_subtype)
#json_result['ZoneFinderErrors'] = '%d' % (error_zones)
json_result['RecognitionErrors'] = '%d' % (error_num_recog)
#json_result['PoorRejectionError'] = '%d' % (error_poor_rejection)
json.dump(json_result, sys.stdout, indent = 2, sort_keys = True)
if __name__ == '__main__':
json_data = None
with open("d:\\workspace\\ikea\\data\\prodmark_engine_settings.json", 'r') as stream:
json_data = json.load(stream)
print 'Recognizer = ' + json_data['Recognizer']
print 'FocusThreshold = %d' % (json_data['FocusThreshold'])
print 'integ_good_result = %d' % json_data['IntegratorParameters']['integ_good_result']
print '\n'
try:
global PATH_TO_PRODMARK_GO
global PATH_TO_INIT_PATH
global PATH_TO_DEBUG_IMG_DIR
global USE_INTERNAL_INTERFACE
if (len(sys.argv) < 5) or (len(sys.argv) > 9):
print_usage()
sys.exit()
elif len(sys.argv) == 6:
clips_dir = sys.argv[1]
markup_dir = sys.argv[2]
lst_file = sys.argv[3]
toJson = sys.argv[4]
USE_INTERNAL_INTERFACE = sys.argv[5]
PATH_TO_PRODMARK_GO = DEFAULT_PATH_TO_PRODMARK_GO
PATH_TO_INIT_PATH = DEFAULT_PATH_TO_INIT_PATH
PATH_TO_DEBUG_IMG_DIR = DEFAULT_PATH_TO_DEBUG_IMG_DIR
elif len(sys.argv) == 9:
clips_dir = sys.argv[1]
markup_dir = sys.argv[2]
lst_file = sys.argv[3]
toJson = sys.argv[4]
USE_INTERNAL_INTERFACE = sys.argv[5]
PATH_TO_PRODMARK_GO = sys.argv[6]
PATH_TO_INIT_PATH = sys.argv[7]
PATH_TO_DEBUG_IMG_DIR = sys.argv[8]
if os.path.exists(PATH_TO_DEBUG_IMG_DIR):
shutil.rmtree(PATH_TO_DEBUG_IMG_DIR)
os.makedirs(PATH_TO_DEBUG_IMG_DIR)
compare_dir = os.path.join(PATH_TO_DEBUG_IMG_DIR, 'compare_dir')
temp_dir = os.path.join(PATH_TO_DEBUG_IMG_DIR, 'temp_dir')
if os.path.exists(compare_dir):
shutil.rmtree(compare_dir)
os.makedirs(os.path.join(compare_dir, 'iPhone_4s'))
os.makedirs(os.path.join(compare_dir, 'iPhone_5'))
os.makedirs(os.path.join(compare_dir, 'iPhone_6'))
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
os.makedirs(os.path.join(temp_dir, 'iPhone_4s'))
os.makedirs(os.path.join(temp_dir, 'iPhone_5'))
os.makedirs(os.path.join(temp_dir, 'iPhone_6'))
just_names_list = []
with open(lst_file, 'r') as stream:
just_names_list = [line.strip() for line in stream]
clips_list = [os.path.join(clips_dir, x) for x in just_names_list]
markup_list = [os.path.join(markup_dir, x + '.json') for x in just_names_list]
compare_list = [os.path.join(compare_dir, x + '.json') for x in just_names_list]
stdout_list = [os.path.join(temp_dir, x + '.json') for x in just_names_list]
stderr_list = [os.path.join(temp_dir, x + '.dump') for x in just_names_list]
global PATH_TO_ERROR_FILE
PATH_TO_ERROR_FILE = os.path.join(PATH_TO_DEBUG_IMG_DIR, DEFAULT_PATH_TO_ERROR_FILE)
i = 1
tot = len(clips_list)
for job in zip(clips_list, markup_list, compare_list, stdout_list, stderr_list):
#if i % 1 == 0 :
# print "Processing %d/%d job" % (i, tot)
process_job(job)
i += 1
compute_statistics(compare_list, toJson)
except Exception as e:
print 'Exception caught:', e
sys.exit(-1)