Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import pickle
- from hyperopt import fmin, tpe, hp, Trials
- '''<params>
- <QETopTermins>1</QETopTermins>
- <QEMinDocRank>0.00031185491253484665</QEMinDocRank>
- <QETopLemms>18</QETopLemms>
- <DIPowerMethodDFactor>0.35</DIPowerMethodDFactor>
- <PQuerryEx>true</PQuerryEx>
- <QEDocCount>99</QEDocCount>
- <DocImportance>false</DocImportance>
- <QEQuerrySize>14</QEQuerrySize>
- <TempMaxDailyAnswerSize>15</TempMaxDailyAnswerSize>
- <QEDEInitQuerrySize>8</QEDEInitQuerrySize>
- <PLambda>0.6733578927184871</PLambda>
- <DIDocBoundary>0.7</DIDocBoundary>
- <PDocCount>411</PDocCount>
- <PTemporalMode>true</PTemporalMode>
- <QEDoubleExtension>true</QEDoubleExtension>
- <DIMinLinkScore>0.5</DIMinLinkScore>
- <PSoftOr>0.2814202900764047</PSoftOr>
- <PKeepT>3</PKeepT>
- <PKeepL>6</PKeepL>
- <QESoftOr>0.6613598198243243</QESoftOr>
- <DIAlpha>0.35</DIAlpha>
- </params>
- '''
- '''
- space = {'keep_l': hp.uniform('keep_l', 3, 7),
- 'keep_t': hp.uniform('keep_t',0, 5),
- 'top_l': hp.uniform('top_l', 10, 20),
- 'top_t': hp.uniform('top_t', 0, 5),
- 'q_size': hp.uniform('q_size', 5, 15),
- 'init_q_size': hp.uniform('init_q_size', 3, 10),
- 'QESoftOr': hp.uniform('QESoftOr', 0.2, 0.7),
- 'DIMinLinkScore': hp.uniform('DIMinLinkScore', 0.2, 0.8),
- 'QEMinDocRank': hp.uniform('QEMinDocRank', 0.0, 0.4),
- 'PLambda': hp.uniform('PLambda', 0.3, 0.9),
- 'QEDocCount': hp.uniform('QEDocCount', 50, 80),
- 'PDocCount': hp.uniform('PDocCount', 300, 600),
- 'PSoftOr': hp.uniform('PSoftOr', 0.3, 0.7),
- 'DIDocBoundary': hp.uniform('DIDocBoundary', 0.5, 0.9),
- 'DIAlpha': hp.uniform('DIAlpha', 0.2, 1),
- }
- '''
- space = {
- #'DIMinLinkScore': hp.uniform('DIMinLinkScore', 0.2, 0.8),
- #'DIDocBoundary': hp.uniform('DIDocBoundary', 0.5, 0.9),
- #'DIAlpha': hp.uniform('DIAlpha', 0.2, 1),
- 'PLambda': hp.uniform('PLambda', 0.5, 0.9),
- 'QEQuerrySize': hp.uniform('QEQuerrySize', 7, 20),
- 'QETopLemms': hp.uniform('QETopLemms', 10, 20),
- 'QETopTermins': hp.uniform('QETopTermins', 0, 5),
- 'QEDEInitQuerrySize': hp.uniform('QEDEInitQuerrySize', 3, 8),
- 'PKeepL': hp.uniform('PKeepL', 1, 8),
- 'PKeepT': hp.uniform('PKeepT', 0, 4),
- 'QESoftOr': hp.uniform('QESoftOr', 0.2, 0.8),
- 'QEMinDocRank': hp.uniform('QEMinDocRank', 0.0, 0.5),
- 'QEDocCount': hp.uniform('QEDocCount', 20, 100),
- 'PDocCount': hp.uniform('PDocCount', 200, 600),
- 'PSoftOr': hp.uniform('PSoftOr', 0.2, 0.8),
- }
- def func_to_minimize(space) :
- print("Start func...")
- evaluator.m_Config.m_Params["QEQuerrySize"] = int(space['QEQuerrySize'])
- evaluator.m_Config.m_Params["QETopLemms"] = int(space['QETopLemms'])
- evaluator.m_Config.m_Params["QETopTermins"] = int(space['QETopTermins'])
- evaluator.m_Config.m_Params["QEDEInitQuerrySize"] = int(space['QEDEInitQuerrySize'])
- evaluator.m_Config.m_Params["PKeepL"] = int(space['PKeepL'])
- evaluator.m_Config.m_Params["PKeepT"] = int(space['PKeepT'])
- evaluator.m_Config.m_Params["QESoftOr"] = space['QESoftOr']
- evaluator.m_Config.m_Params["QEMinDocRank"] = space['QEMinDocRank']
- evaluator.m_Config.m_Params["QEDocCount"] = int(space['QEDocCount'])
- evaluator.m_Config.m_Params["PDocCount"] = int(space['PDocCount'])
- evaluator.m_Config.m_Params["PSoftOr"] = space['PSoftOr']
- evaluator.m_Config.m_Params["DIDocBoundary"] = 0.5 #space['DIDocBoundary']
- evaluator.m_Config.m_Params["DIAlpha"] = 0.5 #space['DIAlpha']
- evaluator.m_Config.m_Params["DIMinLinkScore"] = 0.5 #space['DIMinLinkScore']
- evaluator.m_Config.m_Params["PLambda"] = space['PLambda']
- evaluator.m_Config.m_Params["PTemporalMode"] = "false"
- evaluator.m_Config.m_Params["DocImportance"] = "false"
- evaluator.m_Config.m_Params["DIPowerMethodDFactor"] = 0.35
- evaluator.m_Config.m_Params["TempMaxDailyAnswerSize"] = 15
- evaluator.m_Config.m_Params["PQuerryEx"] = "true"
- evaluator.m_Config.m_Params["QEDoubleExtension"] = "true"
- evaluator.m_Config.m_Params["DIW2VEnable"] = "false"
- evaluator.m_Config.m_Params["SlvW2VEnable"] = "false"
- evaluator.parse_mapping()
- evaluator.parse_gold()
- evaluator.create_args("Fit")
- evaluator.save_config()
- evaluator.call_tss()
- evaluator.parse_answer()
- evaluator.compute_all_metrics()
- evaluator.save_evaluation_in_file()
- score = evaluator.compute_mean_for_metric(evaluator.m_Metrics["R1"]) + evaluator.compute_mean_for_metric(evaluator.m_Metrics["R2"])
- print("End func. Score = " + str(score))
- return 2 - score
- trials = pickle.load(open("no_temp_no_imp_18_01_18.p", "rb"))
- for i in range(0, 100) :
- best = fmin(
- fn=func_to_minimize,
- space=space,
- algo=tpe.suggest,
- max_evals=len(trials.trials) + 10,
- trials=trials
- )
- pickle.dump(trials, open("no_temp_no_imp_18_01_18.p", "wb"))
- print(len(trials.trials))
- print(best)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement