Update Librosa Version To V0.10.0

This commit is contained in:
Matthew Boakes 2023-04-05 00:59:20 +01:00
parent 95fa2c9fd6
commit 4c829e74a1
5 changed files with 10 additions and 10 deletions

View File

@ -149,7 +149,7 @@ def spec_to_mel(spec, n_fft, num_mels, sample_rate, fmin, fmax):
dtype_device = str(spec.dtype) + "_" + str(spec.device) dtype_device = str(spec.dtype) + "_" + str(spec.device)
fmax_dtype_device = str(fmax) + "_" + dtype_device fmax_dtype_device = str(fmax) + "_" + dtype_device
if fmax_dtype_device not in mel_basis: if fmax_dtype_device not in mel_basis:
mel = librosa_mel_fn(sample_rate, n_fft, num_mels, fmin, fmax) mel = librosa_mel_fn(sr=sample_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
mel = torch.matmul(mel_basis[fmax_dtype_device], spec) mel = torch.matmul(mel_basis[fmax_dtype_device], spec)
mel = amp_to_db(mel) mel = amp_to_db(mel)
@ -176,7 +176,7 @@ def wav_to_mel(y, n_fft, num_mels, sample_rate, hop_length, win_length, fmin, fm
fmax_dtype_device = str(fmax) + "_" + dtype_device fmax_dtype_device = str(fmax) + "_" + dtype_device
wnsize_dtype_device = str(win_length) + "_" + dtype_device wnsize_dtype_device = str(win_length) + "_" + dtype_device
if fmax_dtype_device not in mel_basis: if fmax_dtype_device not in mel_basis:
mel = librosa_mel_fn(sample_rate, n_fft, num_mels, fmin, fmax) mel = librosa_mel_fn(sr=sample_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
if wnsize_dtype_device not in hann_window: if wnsize_dtype_device not in hann_window:
hann_window[wnsize_dtype_device] = torch.hann_window(win_length).to(dtype=y.dtype, device=y.device) hann_window[wnsize_dtype_device] = torch.hann_window(win_length).to(dtype=y.dtype, device=y.device)

View File

@ -269,7 +269,7 @@ def compute_f0(
np.ndarray: Pitch. Shape :math:`[T_pitch,]`. :math:`T_pitch == T_wav / hop_length` np.ndarray: Pitch. Shape :math:`[T_pitch,]`. :math:`T_pitch == T_wav / hop_length`
Examples: Examples:
>>> WAV_FILE = filename = librosa.util.example_audio_file() >>> WAV_FILE = filename = librosa.example('vibeace')
>>> from TTS.config import BaseAudioConfig >>> from TTS.config import BaseAudioConfig
>>> from TTS.utils.audio import AudioProcessor >>> from TTS.utils.audio import AudioProcessor
>>> conf = BaseAudioConfig(pitch_fmax=640, pitch_fmin=1) >>> conf = BaseAudioConfig(pitch_fmax=640, pitch_fmin=1)
@ -310,7 +310,7 @@ def compute_energy(y: np.ndarray, **kwargs) -> np.ndarray:
Returns: Returns:
np.ndarray: energy. Shape :math:`[T_energy,]`. :math:`T_energy == T_wav / hop_length` np.ndarray: energy. Shape :math:`[T_energy,]`. :math:`T_energy == T_wav / hop_length`
Examples: Examples:
>>> WAV_FILE = filename = librosa.util.example_audio_file() >>> WAV_FILE = filename = librosa.example('vibeace')
>>> from TTS.config import BaseAudioConfig >>> from TTS.config import BaseAudioConfig
>>> from TTS.utils.audio import AudioProcessor >>> from TTS.utils.audio import AudioProcessor
>>> conf = BaseAudioConfig() >>> conf = BaseAudioConfig()

View File

@ -243,7 +243,7 @@ class AudioProcessor(object):
if self.mel_fmax is not None: if self.mel_fmax is not None:
assert self.mel_fmax <= self.sample_rate // 2 assert self.mel_fmax <= self.sample_rate // 2
return librosa.filters.mel( return librosa.filters.mel(
self.sample_rate, self.fft_size, n_mels=self.num_mels, fmin=self.mel_fmin, fmax=self.mel_fmax sr=self.sample_rate, n_fft=self.fft_size, n_mels=self.num_mels, fmin=self.mel_fmin, fmax=self.mel_fmax
) )
def _stft_parameters( def _stft_parameters(
@ -569,7 +569,7 @@ class AudioProcessor(object):
np.ndarray: Pitch. np.ndarray: Pitch.
Examples: Examples:
>>> WAV_FILE = filename = librosa.util.example_audio_file() >>> WAV_FILE = filename = librosa.example('vibeace')
>>> from TTS.config import BaseAudioConfig >>> from TTS.config import BaseAudioConfig
>>> from TTS.utils.audio import AudioProcessor >>> from TTS.utils.audio import AudioProcessor
>>> conf = BaseAudioConfig(pitch_fmax=640, pitch_fmin=1) >>> conf = BaseAudioConfig(pitch_fmax=640, pitch_fmin=1)
@ -711,7 +711,7 @@ class AudioProcessor(object):
Args: Args:
filename (str): Path to the wav file. filename (str): Path to the wav file.
""" """
return librosa.get_duration(filename) return librosa.get_duration(path=filename)
@staticmethod @staticmethod
def mulaw_encode(wav: np.ndarray, qc: int) -> np.ndarray: def mulaw_encode(wav: np.ndarray, qc: int) -> np.ndarray:

View File

@ -144,8 +144,8 @@ class TorchSTFT(nn.Module): # pylint: disable=abstract-method
def _build_mel_basis(self): def _build_mel_basis(self):
mel_basis = librosa.filters.mel( mel_basis = librosa.filters.mel(
self.sample_rate, sr=self.sample_rate,
self.n_fft, n_fft=self.n_fft,
n_mels=self.n_mels, n_mels=self.n_mels,
fmin=self.mel_fmin, fmin=self.mel_fmin,
fmax=self.mel_fmax, fmax=self.mel_fmax,

View File

@ -6,7 +6,7 @@ scipy>=1.4.0
torch>=1.7 torch>=1.7
torchaudio torchaudio
soundfile soundfile
librosa==0.8.0 librosa>=0.10.0
numba==0.55.1;python_version<"3.9" numba==0.55.1;python_version<"3.9"
numba==0.56.4;python_version>="3.9" numba==0.56.4;python_version>="3.9"
inflect==5.6.0 inflect==5.6.0