Skip to content

Commit

Permalink
add to layers.py docs
Browse files Browse the repository at this point in the history
  • Loading branch information
jazcollins committed Oct 2, 2023
1 parent 054d1ef commit 65b1a8b
Showing 1 changed file with 6 additions and 2 deletions.
8 changes: 6 additions & 2 deletions diffusion/models/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,11 @@ def zero_module(module):
class ClippedAttnProcessor2_0:
"""Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
Modified from https://github.com/huggingface/diffusers/blob/v0.21.0-release/src/diffusers/models/attention_processor.py to
Modified from https://github.com/huggingface/diffusers/blob/v0.21.0-release/src/diffusers/models/attention_processor.py#L977 to
allow clipping QKV values.
Args:
clip_val (float, defaults to 6.0): Amount to clip query, key, and value by.
"""

def __init__(self, clip_val=6.0):
Expand Down Expand Up @@ -120,7 +123,7 @@ def __call__(
class ClippedXFormersAttnProcessor:
"""Processor for implementing memory efficient attention using xFormers.
Modified from https://github.com/huggingface/diffusers/blob/v0.21.0-release/src/diffusers/models/attention_processor.py to
Modified from https://github.com/huggingface/diffusers/blob/v0.21.0-release/src/diffusers/models/attention_processor.py#L888 to
allow clipping QKV values.
Args:
Expand All @@ -129,6 +132,7 @@ class ClippedXFormersAttnProcessor:
[operator](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.AttentionOpBase) to
use as the attention operator. It is recommended to set to `None`, and allow xFormers to choose the best
operator.
clip_val (float, defaults to 6.0): Amount to clip query, key, and value by.
"""

def __init__(self, clip_val=6.0, attention_op=None):
Expand Down

0 comments on commit 65b1a8b

Please sign in to comment.