Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torchvision
- import numpy as np
- from torchvision import transforms
- import torch
- from torch.utils.data import Dataset
- from torch.utils.data import Subset
- from sklearn.model_selection import train_test_split
- import avalanche
- from avalanche.benchmarks.generators import nc_scenario, ni_scenario
- class IDSDataset(Dataset):
- def __init__(self,transform=None):
- self.path_ds = '../data/dataset.npy'
- self.path_lab = '../data/labels.npy'
- self.ds = np.load(self.path_ds)
- self.label = np.load(self.path_lab)
- self.n_samples= len(self.ds)
- self.transform = transform
- def __len__(self):
- return self.n_samples
- def __getitem__(self,index):
- sample = self.ds[index],int(self.label[index,0])
- if self.transform:
- sample=self.transform(sample)
- return sample
- class ToTensor:
- # Convert ndarrays to Tensors
- def __call__(self, sample):
- inputs, target = sample
- all_inputs_normalised = (inputs - inputs.min(axis=0))/(inputs.max(axis=0)-inputs.min(axis=0)+1e-5)
- # converts inputs to tensors
- return torch.Tensor(all_inputs_normalised), target
- def train_val_dataset(dataset, val_split=0.33):
- train_idx, val_idx = train_test_split(list(range(len(dataset))), test_size=val_split,random_state=42)
- return Subset(dataset,train_idx),Subset(dataset,val_idx)
- def IDS():
- whole = IDSDataset(transform=ToTensor())
- train_dataset, val_dataset = train_val_dataset(whole)
- return train_dataset, val_dataset
- train_dataset,val_dataset = IDS()
- print(f'''Type check here: Type of train_dataset: {type(train_dataset)}",
- Type of inputs: {type(train_dataset[0][0])}, Type or Targets: {type(train_dataset[0][1])} \n One sample of TrainingData: \n {train_dataset[0]}''')
- scenario = nc_scenario(
- train_dataset, val_dataset, n_experiences=15, shuffle=True, seed=1234,
- task_labels=False
- )
- train_stream = scenario.train_stream
- for experience in train_stream:
- t = experience.task_label
- exp_id = experience.current_experience
- training_dataset = experience.dataset
- print('Task {} batch {} -> train'.format(t, exp_id))
- print('This batch contains', len(training_dataset), 'patterns')
Advertisement
Add Comment
Please, Sign In to add comment