def compute_tilde_alphas(times, alphas):
"""
Compute tilde_alpha_t for each t in the batch, starting from 0.
Args:
times: Tensor of times (shape [batch_size]).
alphas: Tensor of alpha values corresponding to times (shape [batch_size]).
Returns:
tilde_alphas: Tensor of computed tilde_alpha values (shape [batch_size]).
"""
# Ensure times and alphas are in the same batch dimension
assert times.shape == alphas.shape, "times and alphas must have the same shape"
batch_size = times.shape[0]
tilde_alphas = torch.zeros(batch_size, device=alphas.device)
# Iterate over the batch
for i in range(batch_size):
t = int(times[i]) # Current time for this batch element
alpha_t = alphas[i] # Corresponding alpha
# Recursively compute tilde_alpha for this t
tilde_alpha_t = 0 # Start with tilde_alpha_0 = 0
for step in range(1, t + 1): # Assume t defines the recursion depth
tilde_alpha_t = torch.sqrt(alpha_t) * (1 + tilde_alpha_t)
# Store the result
tilde_alphas[i] = tilde_alpha_t
return tilde_alphas
Я думаю, что это может быть правильно, хотя я почти уверен, что здесь могут быть более быстрые способы получения результата.
Args: times: Tensor of times (shape [batch_size]). alphas: Tensor of alpha values corresponding to times (shape [batch_size]).
Returns: tilde_alphas: Tensor of computed tilde_alpha values (shape [batch_size]). """ # Ensure times and alphas are in the same batch dimension assert times.shape == alphas.shape, "times and alphas must have the same shape"
# Iterate over the batch for i in range(batch_size): t = int(times[i]) # Current time for this batch element alpha_t = alphas[i] # Corresponding alpha
# Recursively compute tilde_alpha for this t tilde_alpha_t = 0 # Start with tilde_alpha_0 = 0 for step in range(1, t + 1): # Assume t defines the recursion depth tilde_alpha_t = torch.sqrt(alpha_t) * (1 + tilde_alpha_t)
# Store the result tilde_alphas[i] = tilde_alpha_t
return tilde_alphas [/code] Я думаю, что это может быть правильно, хотя я почти уверен, что здесь могут быть более быстрые способы получения результата.