Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from egexplainer import AttributionPriorExplainer
- ## in addition to whatever you normally do,
- ## you also need to make explainer object
- background_dataset = ExVivoDrugData(X_train,y_train) # another dataset class containing your training data
- APExp = AttributionPriorExplainer(background_dataset, 64,k=2)
- ## using explainer in example training loop
- for i, (features, labels) in tqdm(enumerate(train_loader)):
- features, labels = features.cuda().float(), labels.cuda().float()
- optimizer.zero_grad()
- outputs = model(features).view(-1)
- # get the expected gradients attributions for your batch
- eg = APExp.shap_values(model,features)
- ma_eg = eg.abs().mean(0) # do some arbitrary function of your attributions
- # here i was doing a graph penalty on the average magnitude explanations
- graph_term = ma_eg.unsqueeze(1)[4862:,:].t().matmul(dense_adj.matmul(ma_eg.unsqueeze(1)[4862:,:]))
- loss = mse_term(outputs, labels) + 10.*graph_term # add to your loss term
- loss.backward(retain_graph=True)
- optimizer.step()
- train_losses.append(loss.item())
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement