Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tensorflow as tf
- ke = tf.keras
- import numpy as np
- import glob
- def preprocessing(fn):
- binary = tf.read_file(fn)
- audio = tf.contrib.ffmpeg.decode_audio(binary)
- # ... etc.
- return audio
- list_of_audio_files = glob.glob('*.wav')
- # Could be:
- dataset = tf.data.Dataset().from_tensor_slices(list_of_audio_files)
- # or:
- dataset = tf.data.Dataset().from_tensor_slices((list_of_mix_files, list_of_source_files))
- # or:
- dataset = tf.data.Dataset().from_tensor_slices(np.random.uniform(-1, 1, [1000, 100]))
- # or some other way, there are multiple (see api reference).
- # Do preprocessing:
- dataset = dataset.map(preprocessing)
- dataset = dataset
- .batch(512) # Select batch size.
- .repeat() # Don't stop getting data when iterated through once (necessary for model.fit).
- .prefetch(100) # Prefetch to ram (there's some other command for prefetching to gpu can't remember).
- model = ke.Sequential(...)
- model.fit(dataset)
- # You can also get the tf tensors for a batch directly from a data.dataset.
- test_dataset = tf.data.Dataset().from_tensor_slices(np.random.uniform(-1, 1, [1000, 100]))
- test_iterator = test_dataset.make_initializable_iterator()
- next_batch = test_iterator.get_next()
- # And maybe grab the model outputs for the test data.
- test_output = model(next_batch)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement