Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- params = {'task': 'train',
- 'boosting_type': 'gbdt',
- 'objective': 'multiclass',
- 'num_class':3,
- 'metric': 'multi_logloss',
- 'learning_rate': 0.002296,
- 'max_depth': 7,
- 'num_leaves': 17,
- 'feature_fraction': 0.4,
- 'bagging_fraction': 0.6,
- 'bagging_freq': 17}
- lgb_cv = lgbm.cv(params, d_train, num_boost_round=10000, nfold=3, shuffle=True, stratified=True, verbose_eval=20, early_stopping_rounds=100)
- nround = lgb_cv['multi_logloss-mean'].index(np.min(lgb_cv['multi_logloss-mean']))
- print(nround)
- model = lgbm.train(params, d_train, num_boost_round=nround)
- preds = model.predict(test)
- print(preds)
- [[ 7.93856847e-06 9.99989550e-01 2.51164967e-06]
- [ 7.26332978e-01 1.65316511e-05 2.73650491e-01]
- [ 7.28564308e-01 8.36756769e-06 2.71427325e-01]
- ...,
- [ 7.26892634e-01 1.26915179e-05 2.73094674e-01]
- [ 5.93217601e-01 2.07172044e-04 4.06575227e-01]
- [ 5.91722491e-05 9.99883828e-01 5.69994435e-05]]
- predictions = []
- for x in preds:
- predictions.append(np.argmax(x))
Add Comment
Please, Sign In to add comment