Skip to content

Discrete noise schedules

DiscreteCosineNoiseSchedule

Bases: DiscreteNoiseSchedule

A cosine noise schedule for Diffusion Models.

Source code in bionemo/moco/schedules/discrete_noise_schedules.py
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
class DiscreteCosineNoiseSchedule(DiscreteNoiseSchedule):
    """A cosine noise schedule for Diffusion Models."""

    def __init__(self, nsteps: int, nu: Float = 1.0, s: Float = 0.008):
        """Initialize the CosineNoiseSchedule.

        Args:
            nsteps (int): Number of time steps.
            nu (Optional[Float]): Hyperparameter for the cosine schedule (default is 1.0).
            s (Optional[Float]): Hyperparameter for the cosine schedule (default is 0.008).
        """
        super().__init__(nsteps=nsteps, direction=TimeDirection.DIFFUSION)
        self.nu = nu
        self.s = s

    def _generate_schedule(self, nsteps: Optional[int] = None, device: Union[str, torch.device] = "cpu") -> Tensor:
        """Generate the cosine noise schedule.

        Args:
            nsteps (Optional[int]): Number of time steps. If None, uses the value from initialization.
            device (Optional[str]): Device to place the schedule on (default is "cpu").

        Returns:
            Tensor: A tensor of time steps + 1 unless full is False.
        """
        if nsteps is None:
            nsteps = self.nsteps
        steps = nsteps + 2
        x = torch.linspace(0, steps, steps, device=device)
        alphas_cumprod = torch.cos(0.5 * torch.pi * (((x / steps) ** self.nu) + self.s) / (1 + self.s)) ** 2
        alphas_cumprod_new = alphas_cumprod / alphas_cumprod[0]
        alphas_cumprod_new = self._clip_noise_schedule(alphas_cumprod_new, clip_value=0.05)
        alphas = alphas_cumprod_new[1:] / alphas_cumprod_new[:-1]
        alphas = torch.clamp(alphas, min=0.001)
        betas = 1 - alphas
        betas = torch.clamp(betas, 0.0, 0.999)
        result = 1.0 - betas
        return result[1:]

    def _clip_noise_schedule(self, alphas2: Tensor, clip_value: Float = 0.001) -> Tensor:
        """For a noise schedule given by alpha^2, this clips alpha_t / alpha_t-1. This may help improve stability during sampling.

        Args:
            alphas2 (Tensor): The noise schedule given by alpha^2.
            clip_value (Optional[Float]): The minimum value for alpha_t / alpha_t-1 (default is 0.001).

        Returns:
            Tensor: The clipped noise schedule.
        """
        alphas2 = torch.cat([torch.ones(1, device=alphas2.device), alphas2], dim=0)

        alphas_step = alphas2[1:] / alphas2[:-1]

        alphas_step = torch.clamp(alphas_step, min=clip_value, max=1.0)
        alphas2 = torch.cumprod(alphas_step, dim=0)

        return alphas2

__init__(nsteps, nu=1.0, s=0.008)

Initialize the CosineNoiseSchedule.

Parameters:

Name Type Description Default
nsteps int

Number of time steps.

required
nu Optional[Float]

Hyperparameter for the cosine schedule (default is 1.0).

1.0
s Optional[Float]

Hyperparameter for the cosine schedule (default is 0.008).

0.008
Source code in bionemo/moco/schedules/discrete_noise_schedules.py
104
105
106
107
108
109
110
111
112
113
114
def __init__(self, nsteps: int, nu: Float = 1.0, s: Float = 0.008):
    """Initialize the CosineNoiseSchedule.

    Args:
        nsteps (int): Number of time steps.
        nu (Optional[Float]): Hyperparameter for the cosine schedule (default is 1.0).
        s (Optional[Float]): Hyperparameter for the cosine schedule (default is 0.008).
    """
    super().__init__(nsteps=nsteps, direction=TimeDirection.DIFFUSION)
    self.nu = nu
    self.s = s

DiscreteNoiseSchedule

Bases: ABC

A base class for discrete schedules. No matter the definition this class returns objects using a unified direction of time.

Source code in bionemo/moco/schedules/discrete_noise_schedules.py
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
class DiscreteNoiseSchedule(ABC):
    """A base class for discrete schedules. No matter the definition this class returns objects using a unified direction of time."""

    def __init__(self, nsteps: int, direction: TimeDirection):
        """Initialize the DiscreteNoiseSchedule.

        Args:
            nsteps (Optional[int]): Number of time steps. If None, uses the value from initialization.
            direction (Optional[str]): TimeDirection to synchronize the schedule with. If the schedule is defined with a different direction, this parameter allows to flip the direction to match the specified one (default is None).

        """
        self.nsteps = nsteps
        self.direction = string_to_enum(direction, TimeDirection)

    def generate_schedule(
        self,
        nsteps: Optional[int] = None,
        device: Union[str, torch.device] = "cpu",
        synchronize: Optional[TimeDirection] = None,
    ) -> Tensor:
        """Public wrapper to generate the time schedule as a tensor.

        Args:
            nsteps (Optional[int]): Number of time steps. If None, uses the value from initialization.
            device (Optional[str]): Device to place the schedule on (default is "cpu").
            synchronize (Optional[str]): TimeDirection to synchronize the schedule with. If the schedule is defined with a different direction, this parameter allows to flip the direction to match the specified one (default is None).

        Returns:
            Tensor: A tensor of time steps + 1 unless full is False.
        """
        schedule = self._generate_schedule(nsteps, device)
        if synchronize is None:
            return schedule
        synchronize = string_to_enum(synchronize, TimeDirection)
        if self.direction != synchronize:
            return torch.flip(schedule, dims=[0])
        return schedule

    @abstractmethod
    def _generate_schedule(self, nsteps: Optional[int] = None, device: Union[str, torch.device] = "cpu") -> Tensor:
        """Generate the time schedule as a list.

        Args:
            nsteps (Optional[int]): Number of time steps. If None, uses the value from initialization.
            device (Optional[str]): Device to place the schedule on (default is "cpu").

        Returns:
            Tensor: A tensor of time steps + 1 unless full is False.
        """
        pass

    def calculate_derivative(
        self,
        nsteps: Optional[int] = None,
        device: Union[str, torch.device] = "cpu",
        synchronize: Optional[TimeDirection] = None,
    ) -> Tensor:
        """Calculate the time derivative of the schedule.

        Args:
            nsteps (Optional[int]): Number of time steps. If None, uses the value from initialization.
            device (Optional[str]): Device to place the schedule on (default is "cpu").
            synchronize (Optional[str]): TimeDirection to synchronize the schedule with. If the schedule is defined with a different direction, this parameter allows to flip the direction to match the specified one (default is None).

        Returns:
            Tensor: A tensor representing the time derivative of the schedule.

        Raises:
            NotImplementedError: If the derivative calculation is not implemented for this schedule.
        """
        raise NotImplementedError("Derivative calculation is not implemented for this schedule.")

__init__(nsteps, direction)

Initialize the DiscreteNoiseSchedule.

Parameters:

Name Type Description Default
nsteps Optional[int]

Number of time steps. If None, uses the value from initialization.

required
direction Optional[str]

TimeDirection to synchronize the schedule with. If the schedule is defined with a different direction, this parameter allows to flip the direction to match the specified one (default is None).

required
Source code in bionemo/moco/schedules/discrete_noise_schedules.py
31
32
33
34
35
36
37
38
39
40
def __init__(self, nsteps: int, direction: TimeDirection):
    """Initialize the DiscreteNoiseSchedule.

    Args:
        nsteps (Optional[int]): Number of time steps. If None, uses the value from initialization.
        direction (Optional[str]): TimeDirection to synchronize the schedule with. If the schedule is defined with a different direction, this parameter allows to flip the direction to match the specified one (default is None).

    """
    self.nsteps = nsteps
    self.direction = string_to_enum(direction, TimeDirection)

calculate_derivative(nsteps=None, device='cpu', synchronize=None)

Calculate the time derivative of the schedule.

Parameters:

Name Type Description Default
nsteps Optional[int]

Number of time steps. If None, uses the value from initialization.

None
device Optional[str]

Device to place the schedule on (default is "cpu").

'cpu'
synchronize Optional[str]

TimeDirection to synchronize the schedule with. If the schedule is defined with a different direction, this parameter allows to flip the direction to match the specified one (default is None).

None

Returns:

Name Type Description
Tensor Tensor

A tensor representing the time derivative of the schedule.

Raises:

Type Description
NotImplementedError

If the derivative calculation is not implemented for this schedule.

Source code in bionemo/moco/schedules/discrete_noise_schedules.py
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
def calculate_derivative(
    self,
    nsteps: Optional[int] = None,
    device: Union[str, torch.device] = "cpu",
    synchronize: Optional[TimeDirection] = None,
) -> Tensor:
    """Calculate the time derivative of the schedule.

    Args:
        nsteps (Optional[int]): Number of time steps. If None, uses the value from initialization.
        device (Optional[str]): Device to place the schedule on (default is "cpu").
        synchronize (Optional[str]): TimeDirection to synchronize the schedule with. If the schedule is defined with a different direction, this parameter allows to flip the direction to match the specified one (default is None).

    Returns:
        Tensor: A tensor representing the time derivative of the schedule.

    Raises:
        NotImplementedError: If the derivative calculation is not implemented for this schedule.
    """
    raise NotImplementedError("Derivative calculation is not implemented for this schedule.")

generate_schedule(nsteps=None, device='cpu', synchronize=None)

Public wrapper to generate the time schedule as a tensor.

Parameters:

Name Type Description Default
nsteps Optional[int]

Number of time steps. If None, uses the value from initialization.

None
device Optional[str]

Device to place the schedule on (default is "cpu").

'cpu'
synchronize Optional[str]

TimeDirection to synchronize the schedule with. If the schedule is defined with a different direction, this parameter allows to flip the direction to match the specified one (default is None).

None

Returns:

Name Type Description
Tensor Tensor

A tensor of time steps + 1 unless full is False.

Source code in bionemo/moco/schedules/discrete_noise_schedules.py
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
def generate_schedule(
    self,
    nsteps: Optional[int] = None,
    device: Union[str, torch.device] = "cpu",
    synchronize: Optional[TimeDirection] = None,
) -> Tensor:
    """Public wrapper to generate the time schedule as a tensor.

    Args:
        nsteps (Optional[int]): Number of time steps. If None, uses the value from initialization.
        device (Optional[str]): Device to place the schedule on (default is "cpu").
        synchronize (Optional[str]): TimeDirection to synchronize the schedule with. If the schedule is defined with a different direction, this parameter allows to flip the direction to match the specified one (default is None).

    Returns:
        Tensor: A tensor of time steps + 1 unless full is False.
    """
    schedule = self._generate_schedule(nsteps, device)
    if synchronize is None:
        return schedule
    synchronize = string_to_enum(synchronize, TimeDirection)
    if self.direction != synchronize:
        return torch.flip(schedule, dims=[0])
    return schedule