From 2d146bb6ec1e67156742442f3f3c44c70e0022de Mon Sep 17 00:00:00 2001 From: Daniel Walmsley Date: Mon, 8 Jul 2024 15:57:39 -0700 Subject: [PATCH] Remove unused code --- TTS/tts/layers/xtts/stream_generator.py | 50 ------------------------- 1 file changed, 50 deletions(-) diff --git a/TTS/tts/layers/xtts/stream_generator.py b/TTS/tts/layers/xtts/stream_generator.py index de3ae760..a248b0aa 100644 --- a/TTS/tts/layers/xtts/stream_generator.py +++ b/TTS/tts/layers/xtts/stream_generator.py @@ -24,21 +24,6 @@ from transformers import ( from transformers.generation.stopping_criteria import validate_stopping_criteria from transformers.generation.utils import GenerateOutput, SampleOutput, logger -def custom_isin(elements, test_elements): - # Flatten the tensors - elements_flat = elements.view(-1) - test_elements_flat = test_elements.view(-1) - - # Create a mask tensor - mask = torch.zeros_like(elements_flat, dtype=torch.bool) - - # Compare each element - for test_element in test_elements_flat: - mask |= (elements_flat == test_element) - - # Reshape the mask to the original elements shape - return mask.view(elements.shape) - def setup_seed(seed: int) -> None: if seed == -1: return @@ -195,41 +180,6 @@ class NewGenerationMixin(GenerationMixin): generation_config.pad_token_id, generation_config.eos_token_id, ) - # pad_token_tensor = ( - # torch.tensor([generation_config.pad_token_id], device=inputs_tensor.device) - # if generation_config.pad_token_id is not None - # else None - # ) - # eos_token_tensor = ( - # torch.tensor([generation_config.eos_token_id], device=inputs_tensor.device) - # if generation_config.eos_token_id is not None - # else None - # ) - - # # hack to produce attention mask for mps devices since transformers bails but pytorch supports torch.isin on mps now - # # for this to work, you must run with PYTORCH_ENABLE_MPS_FALLBACK=1 and call model.to(mps_device) on the XttsModel - # if inputs_tensor.device.type == "mps": - # default_attention_mask = torch.ones(inputs_tensor.shape[:2], dtype=torch.long, device=inputs_tensor.device) - - # is_pad_token_in_inputs = (pad_token_tensor is not None) and ( - # custom_isin(elements=inputs_tensor, test_elements=pad_token_tensor).any() - # ) - # is_pad_token_not_equal_to_eos_token_id = (eos_token_tensor is None) or ~( - # custom_isin(elements=eos_token_tensor, test_elements=pad_token_tensor).any() - # ) - # can_infer_attention_mask = is_pad_token_in_inputs * is_pad_token_not_equal_to_eos_token_id - # attention_mask_from_padding = inputs_tensor.ne(pad_token_tensor).long() - - # model_kwargs["attention_mask"] = ( - # attention_mask_from_padding * can_infer_attention_mask - # + default_attention_mask * ~can_infer_attention_mask - # ) - # else: - # model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( - # inputs_tensor, - # pad_token_tensor, - # eos_token_tensor, - # ) # decoder-only models should use left-padding for generation if not self.config.is_encoder_decoder: