mirror of https://github.com/coqui-ai/TTS.git
Add Perfect Sampler and remove storage
This commit is contained in:
parent
8ba3385747
commit
0e372e0b9b
|
@ -13,6 +13,7 @@ from trainer.torch import NoamLR
|
||||||
from TTS.encoder.dataset import EncoderDataset
|
from TTS.encoder.dataset import EncoderDataset
|
||||||
from TTS.encoder.losses import AngleProtoLoss, GE2ELoss, SoftmaxAngleProtoLoss
|
from TTS.encoder.losses import AngleProtoLoss, GE2ELoss, SoftmaxAngleProtoLoss
|
||||||
from TTS.encoder.utils.generic_utils import save_best_model, setup_speaker_encoder_model
|
from TTS.encoder.utils.generic_utils import save_best_model, setup_speaker_encoder_model
|
||||||
|
from TTS.encoder.utils.samplers import PerfectBatchSampler
|
||||||
from TTS.encoder.utils.training import init_training
|
from TTS.encoder.utils.training import init_training
|
||||||
from TTS.encoder.utils.visual import plot_embeddings
|
from TTS.encoder.utils.visual import plot_embeddings
|
||||||
from TTS.tts.datasets import load_tts_samples
|
from TTS.tts.datasets import load_tts_samples
|
||||||
|
@ -41,21 +42,24 @@ def setup_loader(ap: AudioProcessor, is_val: bool = False, verbose: bool = False
|
||||||
voice_len=c.voice_len,
|
voice_len=c.voice_len,
|
||||||
num_utter_per_class=c.num_utter_per_class,
|
num_utter_per_class=c.num_utter_per_class,
|
||||||
num_classes_in_batch=c.num_classes_in_batch,
|
num_classes_in_batch=c.num_classes_in_batch,
|
||||||
use_storage=c.use_storage,
|
|
||||||
skip_classes=c.skip_classes,
|
|
||||||
storage_size=c.storage["storage_size"],
|
|
||||||
sample_from_storage_p=c.storage["sample_from_storage_p"],
|
|
||||||
verbose=verbose,
|
verbose=verbose,
|
||||||
augmentation_config=c.audio_augmentation if not is_val else None,
|
augmentation_config=c.audio_augmentation if not is_val else None,
|
||||||
use_torch_spec=c.model_params.get("use_torch_spec", False),
|
use_torch_spec=c.model_params.get("use_torch_spec", False),
|
||||||
)
|
)
|
||||||
|
|
||||||
# sampler = DistributedSampler(dataset) if num_gpus > 1 else None
|
sampler = PerfectBatchSampler(
|
||||||
|
dataset.items,
|
||||||
|
dataset.get_class_list(),
|
||||||
|
batch_size=c.num_classes_in_batch*c.num_utter_per_class, # total batch size
|
||||||
|
num_classes_in_batch=c.num_classes_in_batch,
|
||||||
|
num_gpus=1,
|
||||||
|
shuffle=False if is_val else True,
|
||||||
|
drop_last=True)
|
||||||
|
|
||||||
loader = DataLoader(
|
loader = DataLoader(
|
||||||
dataset,
|
dataset,
|
||||||
batch_size=c.num_classes_in_batch,
|
|
||||||
shuffle=False,
|
|
||||||
num_workers=c.num_loader_workers,
|
num_workers=c.num_loader_workers,
|
||||||
|
batch_sampler=sampler,
|
||||||
collate_fn=dataset.collate_fn,
|
collate_fn=dataset.collate_fn,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -70,12 +74,31 @@ def train(model, optimizer, scheduler, criterion, data_loader, global_step):
|
||||||
avg_loss_all = 0
|
avg_loss_all = 0
|
||||||
avg_loader_time = 0
|
avg_loader_time = 0
|
||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
|
print(len(data_loader))
|
||||||
for _, data in enumerate(data_loader):
|
for _, data in enumerate(data_loader):
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
|
||||||
# setup input data
|
# setup input data
|
||||||
inputs, labels = data
|
inputs, labels = data
|
||||||
|
# agroup samples of each class in the batch. perfect sampler produces [3,2,1,3,2,1] we need [3,3,2,2,1,1]
|
||||||
|
labels = torch.transpose(labels.view(c.num_utter_per_class, c.num_classes_in_batch), 0, 1).reshape(labels.shape)
|
||||||
|
inputs = torch.transpose(inputs.view(c.num_utter_per_class, c.num_classes_in_batch, -1), 0, 1).reshape(inputs.shape)
|
||||||
|
"""
|
||||||
|
labels_converted = torch.transpose(labels.view(c.num_utter_per_class, c.num_classes_in_batch), 0, 1).reshape(labels.shape)
|
||||||
|
inputs_converted = torch.transpose(inputs.view(c.num_utter_per_class, c.num_classes_in_batch, -1), 0, 1).reshape(inputs.shape)
|
||||||
|
idx = 0
|
||||||
|
for j in range(0, c.num_classes_in_batch, 1):
|
||||||
|
for i in range(j, len(labels), c.num_classes_in_batch):
|
||||||
|
if not torch.all(labels[i].eq(labels_converted[idx])) or not torch.all(inputs[i].eq(inputs_converted[idx])):
|
||||||
|
print("Invalid")
|
||||||
|
print(labels)
|
||||||
|
exit()
|
||||||
|
idx += 1
|
||||||
|
labels = labels_converted
|
||||||
|
inputs = inputs_converted
|
||||||
|
print(labels)
|
||||||
|
print(inputs.shape)"""
|
||||||
|
|
||||||
loader_time = time.time() - end_time
|
loader_time = time.time() - end_time
|
||||||
global_step += 1
|
global_step += 1
|
||||||
|
|
||||||
|
@ -159,9 +182,10 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
optimizer = RAdam(model.parameters(), lr=c.lr, weight_decay=c.wd)
|
optimizer = RAdam(model.parameters(), lr=c.lr, weight_decay=c.wd)
|
||||||
|
|
||||||
# pylint: disable=redefined-outer-name
|
# pylint: disable=redefined-outer-name
|
||||||
meta_data_train, meta_data_eval = load_tts_samples(c.datasets, eval_split=False)
|
meta_data_train, meta_data_eval = load_tts_samples(c.datasets, eval_split=True)
|
||||||
|
|
||||||
data_loader, num_classes, map_classid_to_classname = setup_loader(ap, is_val=False, verbose=True)
|
train_data_loader, num_classes, map_classid_to_classname = setup_loader(ap, is_val=False, verbose=True)
|
||||||
|
# eval_data_loader, _, _ = setup_loader(ap, is_val=True, verbose=True)
|
||||||
|
|
||||||
if c.loss == "ge2e":
|
if c.loss == "ge2e":
|
||||||
criterion = GE2ELoss(loss_method="softmax")
|
criterion = GE2ELoss(loss_method="softmax")
|
||||||
|
@ -211,7 +235,7 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
criterion.cuda()
|
criterion.cuda()
|
||||||
|
|
||||||
global_step = args.restore_step
|
global_step = args.restore_step
|
||||||
_, global_step = train(model, optimizer, scheduler, criterion, data_loader, global_step)
|
_, global_step = train(model, optimizer, scheduler, criterion, train_data_loader, global_step)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
|
@ -27,14 +27,6 @@ class BaseEncoderConfig(BaseTrainingConfig):
|
||||||
|
|
||||||
audio_augmentation: Dict = field(default_factory=lambda: {})
|
audio_augmentation: Dict = field(default_factory=lambda: {})
|
||||||
|
|
||||||
use_storage: bool = False
|
|
||||||
storage: Dict = field(
|
|
||||||
default_factory=lambda: {
|
|
||||||
"sample_from_storage_p": 0.66, # the probability with which we'll sample from the DataSet in-memory storage
|
|
||||||
"storage_size": 15, # the size of the in-memory storage with respect to a single batch
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
# training params
|
# training params
|
||||||
max_train_step: int = 1000000 # end training when number of training steps reaches this value.
|
max_train_step: int = 1000000 # end training when number of training steps reaches this value.
|
||||||
loss: str = "angleproto"
|
loss: str = "angleproto"
|
||||||
|
|
|
@ -6,7 +6,6 @@ from torch.utils.data import Dataset
|
||||||
|
|
||||||
from TTS.encoder.utils.generic_utils import AugmentWAV, Storage
|
from TTS.encoder.utils.generic_utils import AugmentWAV, Storage
|
||||||
|
|
||||||
|
|
||||||
class EncoderDataset(Dataset):
|
class EncoderDataset(Dataset):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
|
@ -14,11 +13,7 @@ class EncoderDataset(Dataset):
|
||||||
meta_data,
|
meta_data,
|
||||||
voice_len=1.6,
|
voice_len=1.6,
|
||||||
num_classes_in_batch=64,
|
num_classes_in_batch=64,
|
||||||
use_storage=False,
|
|
||||||
storage_size=1,
|
|
||||||
sample_from_storage_p=0.5,
|
|
||||||
num_utter_per_class=10,
|
num_utter_per_class=10,
|
||||||
skip_classes=False,
|
|
||||||
verbose=False,
|
verbose=False,
|
||||||
augmentation_config=None,
|
augmentation_config=None,
|
||||||
use_torch_spec=None,
|
use_torch_spec=None,
|
||||||
|
@ -34,30 +29,15 @@ class EncoderDataset(Dataset):
|
||||||
self.items = meta_data
|
self.items = meta_data
|
||||||
self.sample_rate = ap.sample_rate
|
self.sample_rate = ap.sample_rate
|
||||||
self.seq_len = int(voice_len * self.sample_rate)
|
self.seq_len = int(voice_len * self.sample_rate)
|
||||||
self.num_classes_in_batch = num_classes_in_batch
|
|
||||||
self.num_utter_per_class = num_utter_per_class
|
self.num_utter_per_class = num_utter_per_class
|
||||||
self.skip_classes = skip_classes
|
|
||||||
self.use_storage = use_storage
|
|
||||||
self.ap = ap
|
self.ap = ap
|
||||||
self.verbose = verbose
|
self.verbose = verbose
|
||||||
self.use_torch_spec = use_torch_spec
|
self.use_torch_spec = use_torch_spec
|
||||||
self.__parse_items()
|
self.__parse_items()
|
||||||
|
|
||||||
storage_max_size = storage_size * num_classes_in_batch
|
self.classname_to_classid = {key: i for i, key in enumerate(self.classes)}
|
||||||
if self.use_storage:
|
|
||||||
self.storage = Storage(
|
|
||||||
maxsize=storage_max_size, storage_batchs=storage_size, num_classes_in_batch=num_classes_in_batch
|
|
||||||
)
|
|
||||||
self.sample_from_storage_p = float(sample_from_storage_p)
|
|
||||||
else:
|
|
||||||
self.storage = None
|
|
||||||
self.sample_from_storage_p = None
|
|
||||||
|
|
||||||
classes_aux = list(self.classes)
|
# Data Augmentation
|
||||||
classes_aux.sort()
|
|
||||||
self.classname_to_classid = {key: i for i, key in enumerate(classes_aux)}
|
|
||||||
|
|
||||||
# Augmentation
|
|
||||||
self.augmentator = None
|
self.augmentator = None
|
||||||
self.gaussian_augmentation_config = None
|
self.gaussian_augmentation_config = None
|
||||||
if augmentation_config:
|
if augmentation_config:
|
||||||
|
@ -71,12 +51,10 @@ class EncoderDataset(Dataset):
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print("\n > DataLoader initialization")
|
print("\n > DataLoader initialization")
|
||||||
print(f" | > Classes per Batch: {num_classes_in_batch}")
|
print(f" | > Classes per Batch: {num_classes_in_batch}")
|
||||||
print(f" | > Storage Size: {storage_max_size} instances, each with {num_utter_per_class} utters")
|
|
||||||
print(f" | > Sample_from_storage_p : {self.sample_from_storage_p}")
|
|
||||||
print(f" | > Number of instances : {len(self.items)}")
|
print(f" | > Number of instances : {len(self.items)}")
|
||||||
print(f" | > Sequence length: {self.seq_len}")
|
print(f" | > Sequence length: {self.seq_len}")
|
||||||
print(f" | > Num Classes: {len(self.classes)}")
|
print(f" | > Num Classes: {len(self.classes)}")
|
||||||
print(f" | > Classes: {list(self.classes)}")
|
print(f" | > Classes: {self.classes}")
|
||||||
|
|
||||||
|
|
||||||
def load_wav(self, filename):
|
def load_wav(self, filename):
|
||||||
|
@ -84,173 +62,84 @@ class EncoderDataset(Dataset):
|
||||||
return audio
|
return audio
|
||||||
|
|
||||||
def __parse_items(self):
|
def __parse_items(self):
|
||||||
self.class_to_utters = {}
|
class_to_utters = {}
|
||||||
for i in self.items:
|
for i in self.items:
|
||||||
path_ = i["audio_file"]
|
path_ = i["audio_file"]
|
||||||
speaker_ = i["speaker_name"]
|
speaker_ = i["speaker_name"]
|
||||||
if speaker_ in self.speaker_to_utters.keys():
|
if class_name in class_to_utters.keys():
|
||||||
self.speaker_to_utters[speaker_].append(path_)
|
class_to_utters[class_name].append(path_)
|
||||||
else:
|
else:
|
||||||
self.class_to_utters[class_name] = [
|
class_to_utters[class_name] = [
|
||||||
path_,
|
path_,
|
||||||
]
|
]
|
||||||
|
|
||||||
if self.skip_classes:
|
# skip classes with number of samples >= self.num_utter_per_class
|
||||||
self.class_to_utters = {
|
class_to_utters = {
|
||||||
k: v for (k, v) in self.class_to_utters.items() if len(v) >= self.num_utter_per_class
|
k: v for (k, v) in class_to_utters.items() if len(v) >= self.num_utter_per_class
|
||||||
}
|
}
|
||||||
|
|
||||||
|
self.classes = list(class_to_utters.keys())
|
||||||
|
self.classes.sort()
|
||||||
|
|
||||||
|
new_items = []
|
||||||
|
for item in self.items:
|
||||||
|
path_ = item[1]
|
||||||
|
class_name = item[2]
|
||||||
|
# ignore filtered classes
|
||||||
|
if class_name not in self.classes:
|
||||||
|
continue
|
||||||
|
# ignore small audios
|
||||||
|
if self.load_wav(path_).shape[0] - self.seq_len <= 0:
|
||||||
|
continue
|
||||||
|
|
||||||
|
new_items.append({"wav_file_path": path_, "class_name": class_name})
|
||||||
|
|
||||||
|
self.items = new_items
|
||||||
|
|
||||||
self.classes = [k for (k, v) in self.class_to_utters.items()]
|
|
||||||
|
|
||||||
def __len__(self):
|
def __len__(self):
|
||||||
return int(1e10)
|
return len(self.items)
|
||||||
|
|
||||||
def get_num_classes(self):
|
def get_num_classes(self):
|
||||||
return len(self.classes)
|
return len(self.classes)
|
||||||
|
|
||||||
|
def get_class_list(self):
|
||||||
|
return list(self.classes)
|
||||||
|
|
||||||
def get_map_classid_to_classname(self):
|
def get_map_classid_to_classname(self):
|
||||||
return dict((c_id, c_n) for c_n, c_id in self.classname_to_classid.items())
|
return dict((c_id, c_n) for c_n, c_id in self.classname_to_classid.items())
|
||||||
|
|
||||||
def __sample_class(self, ignore_classes=None):
|
def __getitem__(self, idx):
|
||||||
class_name = random.sample(self.classes, 1)[0]
|
return self.items[idx]
|
||||||
# if list of classes_id is provide make sure that it's will be ignored
|
|
||||||
if ignore_classes and self.classname_to_classid[class_name] in ignore_classes:
|
|
||||||
while True:
|
|
||||||
class_name = random.sample(self.classes, 1)[0]
|
|
||||||
if self.classname_to_classid[class_name] not in ignore_classes:
|
|
||||||
break
|
|
||||||
|
|
||||||
if self.num_utter_per_class > len(self.class_to_utters[class_name]):
|
def collate_fn(self, batch):
|
||||||
utters = random.choices(self.class_to_utters[class_name], k=self.num_utter_per_class)
|
# get the batch class_ids
|
||||||
else:
|
|
||||||
utters = random.sample(self.class_to_utters[class_name], self.num_utter_per_class)
|
|
||||||
return class_name, utters
|
|
||||||
|
|
||||||
def __sample_class_utterances(self, class_name):
|
|
||||||
"""
|
|
||||||
Sample all M utterances for the given class_name.
|
|
||||||
"""
|
|
||||||
wavs = []
|
|
||||||
labels = []
|
labels = []
|
||||||
for _ in range(self.num_utter_per_class):
|
feats = []
|
||||||
# TODO:dummy but works
|
for item in batch:
|
||||||
while True:
|
utter_path = item["wav_file_path"]
|
||||||
# remove classes that have num_utter less than 2
|
class_name = item["class_name"]
|
||||||
if len(self.class_to_utters[class_name]) > 1:
|
|
||||||
utter = random.sample(self.class_to_utters[class_name], 1)[0]
|
|
||||||
else:
|
|
||||||
if class_name in self.classes:
|
|
||||||
self.classes.remove(class_name)
|
|
||||||
|
|
||||||
class_name, _ = self.__sample_class()
|
# get classid
|
||||||
continue
|
class_id = self.classname_to_classid[class_name]
|
||||||
|
# load wav file
|
||||||
wav = self.load_wav(utter)
|
wav = self.load_wav(utter_path)
|
||||||
if wav.shape[0] - self.seq_len > 0:
|
offset = random.randint(0, wav.shape[0] - self.seq_len)
|
||||||
break
|
wav = wav[offset : offset + self.seq_len]
|
||||||
|
|
||||||
if utter in self.class_to_utters[class_name]:
|
|
||||||
self.class_to_utters[class_name].remove(utter)
|
|
||||||
|
|
||||||
if self.augmentator is not None and self.data_augmentation_p:
|
if self.augmentator is not None and self.data_augmentation_p:
|
||||||
if random.random() < self.data_augmentation_p:
|
if random.random() < self.data_augmentation_p:
|
||||||
wav = self.augmentator.apply_one(wav)
|
wav = self.augmentator.apply_one(wav)
|
||||||
|
|
||||||
wavs.append(wav)
|
if not self.use_torch_spec:
|
||||||
labels.append(self.classname_to_classid[class_name])
|
mel = self.ap.melspectrogram(wav)
|
||||||
return wavs, labels
|
feats.append(torch.FloatTensor(mel))
|
||||||
|
|
||||||
def __getitem__(self, idx):
|
|
||||||
class_name, _ = self.__sample_class()
|
|
||||||
class_id = self.classname_to_classid[class_name]
|
|
||||||
return class_name, class_id
|
|
||||||
|
|
||||||
def __load_from_disk_and_storage(self, class_name):
|
|
||||||
# don't sample from storage, but from HDD
|
|
||||||
wavs_, labels_ = self.__sample_class_utterances(class_name)
|
|
||||||
# put the newly loaded item into storage
|
|
||||||
if self.use_storage:
|
|
||||||
self.storage.append((wavs_, labels_))
|
|
||||||
return wavs_, labels_
|
|
||||||
|
|
||||||
def collate_fn(self, batch):
|
|
||||||
# get the batch class_ids
|
|
||||||
batch = np.array(batch)
|
|
||||||
classes_id_in_batch = set(batch[:, 1].astype(np.int32))
|
|
||||||
|
|
||||||
labels = []
|
|
||||||
feats = []
|
|
||||||
classes = set()
|
|
||||||
|
|
||||||
for class_name, class_id in batch:
|
|
||||||
class_id = int(class_id)
|
|
||||||
|
|
||||||
# ensure that an class appears only once in the batch
|
|
||||||
if class_id in classes:
|
|
||||||
|
|
||||||
# remove current class
|
|
||||||
if class_id in classes_id_in_batch:
|
|
||||||
classes_id_in_batch.remove(class_id)
|
|
||||||
|
|
||||||
class_name, _ = self.__sample_class(ignore_classes=classes_id_in_batch)
|
|
||||||
class_id = self.classname_to_classid[class_name]
|
|
||||||
classes_id_in_batch.add(class_id)
|
|
||||||
|
|
||||||
if self.use_storage and random.random() < self.sample_from_storage_p and self.storage.full():
|
|
||||||
# sample from storage (if full)
|
|
||||||
wavs_, labels_ = self.storage.get_random_sample_fast()
|
|
||||||
|
|
||||||
# force choose the current class or other not in batch
|
|
||||||
# It's necessary for ideal training with AngleProto and GE2E losses
|
|
||||||
if labels_[0] in classes_id_in_batch and labels_[0] != class_id:
|
|
||||||
attempts = 0
|
|
||||||
while True:
|
|
||||||
wavs_, labels_ = self.storage.get_random_sample_fast()
|
|
||||||
if labels_[0] == class_id or labels_[0] not in classes_id_in_batch:
|
|
||||||
break
|
|
||||||
|
|
||||||
attempts += 1
|
|
||||||
# Try 5 times after that load from disk
|
|
||||||
if attempts >= 5:
|
|
||||||
wavs_, labels_ = self.__load_from_disk_and_storage(class_name)
|
|
||||||
break
|
|
||||||
else:
|
else:
|
||||||
# don't sample from storage, but from HDD
|
feats.append(torch.FloatTensor(wav))
|
||||||
wavs_, labels_ = self.__load_from_disk_and_storage(class_name)
|
|
||||||
|
|
||||||
# append class for control
|
labels.append(class_id)
|
||||||
classes.add(labels_[0])
|
|
||||||
|
|
||||||
# remove current class and append other
|
|
||||||
if class_id in classes_id_in_batch:
|
|
||||||
classes_id_in_batch.remove(class_id)
|
|
||||||
|
|
||||||
classes_id_in_batch.add(labels_[0])
|
|
||||||
|
|
||||||
# get a random subset of each of the wavs and extract mel spectrograms.
|
|
||||||
feats_ = []
|
|
||||||
for wav in wavs_:
|
|
||||||
offset = random.randint(0, wav.shape[0] - self.seq_len)
|
|
||||||
wav = wav[offset : offset + self.seq_len]
|
|
||||||
# add random gaussian noise
|
|
||||||
if self.gaussian_augmentation_config and self.gaussian_augmentation_config["p"]:
|
|
||||||
if random.random() < self.gaussian_augmentation_config["p"]:
|
|
||||||
wav += np.random.normal(
|
|
||||||
self.gaussian_augmentation_config["min_amplitude"],
|
|
||||||
self.gaussian_augmentation_config["max_amplitude"],
|
|
||||||
size=len(wav),
|
|
||||||
)
|
|
||||||
|
|
||||||
if not self.use_torch_spec:
|
|
||||||
mel = self.ap.melspectrogram(wav)
|
|
||||||
feats_.append(torch.FloatTensor(mel))
|
|
||||||
else:
|
|
||||||
feats_.append(torch.FloatTensor(wav))
|
|
||||||
|
|
||||||
labels.append(torch.LongTensor(labels_))
|
|
||||||
feats.extend(feats_)
|
|
||||||
|
|
||||||
feats = torch.stack(feats)
|
feats = torch.stack(feats)
|
||||||
labels = torch.stack(labels)
|
labels = torch.LongTensor(labels)
|
||||||
|
|
||||||
return feats, labels
|
return feats, labels
|
||||||
|
|
|
@ -0,0 +1,100 @@
|
||||||
|
import torch
|
||||||
|
import random
|
||||||
|
from torch.utils.data.sampler import Sampler, SubsetRandomSampler
|
||||||
|
|
||||||
|
|
||||||
|
class SubsetSampler(Sampler):
|
||||||
|
"""
|
||||||
|
Samples elements sequentially from a given list of indices.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
indices (list): a sequence of indices
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, indices):
|
||||||
|
self.indices = indices
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return (self.indices[i] for i in range(len(self.indices)))
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.indices)
|
||||||
|
|
||||||
|
|
||||||
|
class PerfectBatchSampler(Sampler):
|
||||||
|
"""
|
||||||
|
Samples a mini-batch of indices for a balanced class batching
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dataset_items(list): dataset items to sample from.
|
||||||
|
classes (list): list of classes of dataset_items to sample from.
|
||||||
|
batch_size (int): total number of samples to be sampled in a mini-batch.
|
||||||
|
num_gpus (int): number of GPU in the data parallel mode.
|
||||||
|
shuffle (bool): if True, samples randomly, otherwise samples sequentially.
|
||||||
|
drop_last (bool): if True, drops last incomplete batch.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, dataset_items, classes, batch_size, num_classes_in_batch, num_gpus=1, shuffle=True, drop_last=False):
|
||||||
|
|
||||||
|
assert batch_size % (len(classes) * num_gpus) == 0, (
|
||||||
|
'Batch size must be divisible by number of classes times the number of data parallel devices (if enabled).')
|
||||||
|
|
||||||
|
label_indices = {}
|
||||||
|
for idx in range(len(dataset_items)):
|
||||||
|
label = dataset_items[idx]['class_name']
|
||||||
|
if label not in label_indices: label_indices[label] = []
|
||||||
|
label_indices[label].append(idx)
|
||||||
|
|
||||||
|
if shuffle:
|
||||||
|
self._samplers = [SubsetRandomSampler(label_indices[key]) for key in classes]
|
||||||
|
else:
|
||||||
|
self._samplers = [SubsetSampler(label_indices[key]) for key in classes]
|
||||||
|
|
||||||
|
self._batch_size = batch_size
|
||||||
|
self._drop_last = drop_last
|
||||||
|
self._dp_devices = num_gpus
|
||||||
|
self._num_classes_in_batch = num_classes_in_batch
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
|
||||||
|
batch = []
|
||||||
|
if self._num_classes_in_batch != len(self._samplers):
|
||||||
|
valid_samplers_idx = random.sample(range(len(self._samplers)), self._num_classes_in_batch)
|
||||||
|
else:
|
||||||
|
valid_samplers_idx = None
|
||||||
|
|
||||||
|
iters = [iter(s) for s in self._samplers]
|
||||||
|
done = False
|
||||||
|
|
||||||
|
while True:
|
||||||
|
b = []
|
||||||
|
for i in range(len(iters)):
|
||||||
|
if valid_samplers_idx is not None and i not in valid_samplers_idx:
|
||||||
|
continue
|
||||||
|
it = iters[i]
|
||||||
|
idx = next(it, None)
|
||||||
|
if idx is None:
|
||||||
|
done = True
|
||||||
|
break
|
||||||
|
b.append(idx)
|
||||||
|
if done: break
|
||||||
|
batch += b
|
||||||
|
if len(batch) == self._batch_size:
|
||||||
|
yield batch
|
||||||
|
batch = []
|
||||||
|
if valid_samplers_idx is not None:
|
||||||
|
valid_samplers_idx = random.sample(range(len(self._samplers)), self._num_classes_in_batch)
|
||||||
|
|
||||||
|
if not self._drop_last:
|
||||||
|
if len(batch) > 0:
|
||||||
|
groups = len(batch) // self._num_classes_in_batch
|
||||||
|
if groups % self._dp_devices == 0:
|
||||||
|
yield batch
|
||||||
|
else:
|
||||||
|
batch = batch[:(groups // self._dp_devices) * self._dp_devices * self._num_classes_in_batch]
|
||||||
|
if len(batch) > 0:
|
||||||
|
yield batch
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
class_batch_size = self._batch_size // self._num_classes_in_batch
|
||||||
|
return min(((len(s) + class_batch_size - 1) // class_batch_size) for s in self._samplers)
|
Loading…
Reference in New Issue