Guest User

Untitled

a guest
May 27th, 2018
91
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.52 KB | None | 0 0
  1. import numpy as np
  2. import mxnet as mx
  3. from mxnet import nd, autograd, gluon
  4.  
  5. model_ctx = mx.gpu(0)
  6. # model_ctx = mx.cpu()
  7.  
  8. batch_size = 128 # Batch basina dusen ornek sayisi
  9. epochs = 100 # Iterasyon sayisi
  10.  
  11. # Yuklenen verinin X ve y kismina uygulanir.
  12. def transform_func(data, label):
  13. # Klasik MNIST verisi donusumu. Tip float yapilir ve
  14. # X degerleri 0 ile 1 arasinda sigdirilmak adina 255'e bolunur.
  15. return data.astype(np.float32)/255., label.astype(np.float32)
  16.  
  17. train_data = gluon.data.DataLoader(gluon.data.vision.MNIST(train=True,\
  18. transform=transform_func), batch_size, shuffle=True)
  19. test_data = gluon.data.DataLoader(gluon.data.vision.MNIST(train=False,\
  20. transform=transform_func), batch_size, shuffle=True)
  21.  
  22. m = gluon.nn.Sequential() # Model
  23. with m.name_scope(): # TF benzeri name_scope tanimi
  24. # Keras benzeri Sequential.add()
  25. m.add(gluon.nn.Dense(100,activation="relu"))
  26. m.add(gluon.nn.Dense(256,activation="relu"))
  27. m.add(gluon.nn.Dense(100,activation="relu"))
  28. m.add(gluon.nn.Dense(10))
  29.  
  30. m.collect_params().initialize(mx.init.Normal(sigma=0.2), ctx=model_ctx)
  31. # Parametrelerin baslangic degerleri STD'si 0.2 olan bir Normal dagilimdan cekildi
  32. loss = gluon.loss.SoftmaxCrossEntropyLoss() # Siniflandirma hata fonksiyonu
  33. trainer = gluon.Trainer(m.collect_params(), "sgd", {"learning_rate": 0.01} )
  34. # Parametreleri SGD kullanarak 0.01 LR ile egit
  35.  
  36. print m # Modeli yazdir
  37. # Egitim dongusu
  38. for e in range(epochs): # Her iterasyon icin
  39. for i, (data, label) in enumerate(train_data): # Her batch icin
  40. # X'i model contexte koy
  41. data = data.as_in_context(model_ctx).reshape([-1, 784])
  42. # y'i model contexte koy
  43. label = label.as_in_context(model_ctx)
  44. with autograd.record(): # Autograd icin name_scope
  45. output = m(data)
  46. l = loss(output, label) # Y ile net(X) arasindaki farki hesapla
  47. l.backward() # PyTorch stili Backward pass
  48. trainer.step(batch_size)
  49.  
  50. print(m) # Modeli yazdir
  51.  
  52. # Test Dongusu
  53. acc = mx.metric.Accuracy() # Dogruluk metrigi
  54. for i, (data, label) in enumerate(test_data): # Her batch icin
  55. data = data.as_in_context(model_ctx).reshape([-1, 784])
  56. label = label.as_in_context(model_ctx)
  57. output = m(data) # net(X) ciktisini hesapla
  58. preds = nd.argmax(output, axis=1)
  59. # Gelen olasilik degerlerinden en yuksek olanin indisini al
  60. # 3 sayi olsaydi 0 1 2 seklinde
  61. # olasiliklar [0.6, 0.4, 0.3] olsaydi
  62. # max=0.6
  63. # argmax = 0 olacakti.
  64. acc.update(preds=preds, labels=label) # Metrigi guncelle
  65. print acc # Dogrulugu yazdir
Add Comment
Please, Sign In to add comment