mirror of
https://github.com/vladmandic/sdnext.git
synced 2026-01-29 05:02:09 +03:00
add clip-skip notes
This commit is contained in:
@@ -14,6 +14,7 @@
|
||||
- **Control** units now have extra option to re-use current preview image as processor input
|
||||
- improved `clip-skip` value handling in diffusers, thanks @AI-Casanova & @Disty0
|
||||
now clip-skip range is 0-12 where previously lowest value was 1 (default is still 1)
|
||||
values can also be decimal to interpolate between different layers, for example `clip-skip: 1.5`, thanks @AI-Casanova
|
||||
- **ONNX**:
|
||||
- allow specify onnx default provider and cpu fallback
|
||||
*settings -> diffusers*
|
||||
|
||||
@@ -70,9 +70,8 @@ class StableDiffusionProcessing:
|
||||
self.all_negative_prompts = None
|
||||
self.all_seeds = None
|
||||
self.all_subseeds = None
|
||||
self.clip_skip = int(clip_skip) if shared.backend == shared.Backend.ORIGINAL else clip_skip
|
||||
shared.opts.data['clip_skip'] = self.clip_skip # for compatibility with a1111 sd_hijack_clip
|
||||
|
||||
self.clip_skip = clip_skip
|
||||
shared.opts.data['clip_skip'] = int(self.clip_skip) # for compatibility with a1111 sd_hijack_clip
|
||||
self.iteration = 0
|
||||
self.is_control = False
|
||||
self.is_hr_pass = False
|
||||
|
||||
@@ -240,7 +240,8 @@ class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase):
|
||||
return tokenized
|
||||
|
||||
def encode_with_transformers(self, tokens):
|
||||
clip_skip = opts.data['clip_skip'] or 1
|
||||
clip_skip = int(opts.data['clip_skip']) or 1
|
||||
print('HERE', type(clip_skip), clip_skip)
|
||||
outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-clip_skip)
|
||||
if clip_skip > 1:
|
||||
z = outputs.hidden_states[-clip_skip]
|
||||
|
||||
Reference in New Issue
Block a user