Skip to content

Commit

Permalink
Merge branch 'gempoll' into gempoll-docker
Browse files Browse the repository at this point in the history
  • Loading branch information
ifsheldon committed Mar 5, 2024
2 parents 3c665b9 + f7a1d35 commit 6229a9b
Show file tree
Hide file tree
Showing 101 changed files with 2,837 additions and 907 deletions.
2 changes: 0 additions & 2 deletions .eslintrc.js
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,6 @@ module.exports = {
// imageviewer.js
modalPrevImage: "readonly",
modalNextImage: "readonly",
// token-counters.js
setupTokenCounters: "readonly",
// localStorage.js
localSet: "readonly",
localGet: "readonly",
Expand Down
144 changes: 138 additions & 6 deletions CHANGELOG.md

Large diffs are not rendered by default.

5 changes: 5 additions & 0 deletions _typos.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
[default.extend-words]
# Part of "RGBa" (Pillow's pre-multiplied alpha RGB mode)
Ba = "Ba"
# HSA is something AMD uses for their GPUs
HSA = "HSA"
8 changes: 4 additions & 4 deletions extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ def p_losses(self, x_start, t, noise=None):
elif self.parameterization == "x0":
target = x_start
else:
raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported")

loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])

Expand Down Expand Up @@ -880,7 +880,7 @@ def forward(self, x, c, *args, **kwargs):
def apply_model(self, x_noisy, t, cond, return_ids=False):

if isinstance(cond, dict):
# hybrid case, cond is exptected to be a dict
# hybrid case, cond is expected to be a dict
pass
else:
if not isinstance(cond, list):
Expand Down Expand Up @@ -916,7 +916,7 @@ def apply_model(self, x_noisy, t, cond, return_ids=False):
cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]

elif self.cond_stage_key == 'coordinates_bbox':
assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
assert 'original_image_size' in self.split_input_params, 'BoundingBoxRescaling is missing original_image_size'

# assuming padding of unfold is always 0 and its dilation is always 1
n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
Expand All @@ -926,7 +926,7 @@ def apply_model(self, x_noisy, t, cond, return_ids=False):
num_downs = self.first_stage_model.encoder.num_resolutions - 1
rescale_latent = 2 ** (num_downs)

# get top left postions of patches as conforming for the bbbox tokenizer, therefore we
# get top left positions of patches as conforming for the bbbox tokenizer, therefore we
# need to rescale the tl patch coordinates to be in between (0,1)
tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
Expand Down
2 changes: 1 addition & 1 deletion extensions-builtin/Lora/lyco_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def factorization(dimension: int, factor:int=-1) -> tuple[int, int]:
In LoRA with Kroneckor Product, first value is a value for weight scale.
secon value is a value for weight.
Becuase of non-commutative property, A⊗B ≠ B⊗A. Meaning of two matrices is slightly different.
Because of non-commutative property, A⊗B ≠ B⊗A. Meaning of two matrices is slightly different.
examples)
factor
Expand Down
90 changes: 63 additions & 27 deletions extensions-builtin/Lora/network_oft.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import torch
import network
from lyco_helpers import factorization
from einops import rearrange


Expand All @@ -22,20 +21,28 @@ def __init__(self, net: network.Network, weights: network.NetworkWeights):
self.org_module: list[torch.Module] = [self.sd_module]

self.scale = 1.0
self.is_R = False
self.is_boft = False

# kohya-ss
# kohya-ss/New LyCORIS OFT/BOFT
if "oft_blocks" in weights.w.keys():
self.is_kohya = True
self.oft_blocks = weights.w["oft_blocks"] # (num_blocks, block_size, block_size)
self.alpha = weights.w["alpha"] # alpha is constraint
self.alpha = weights.w.get("alpha", None) # alpha is constraint
self.dim = self.oft_blocks.shape[0] # lora dim
# LyCORIS
# Old LyCORIS OFT
elif "oft_diag" in weights.w.keys():
self.is_kohya = False
self.is_R = True
self.oft_blocks = weights.w["oft_diag"]
# self.alpha is unused
self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size)

# LyCORIS BOFT
if self.oft_blocks.dim() == 4:
self.is_boft = True
self.rescale = weights.w.get('rescale', None)
if self.rescale is not None:
self.rescale = self.rescale.reshape(-1, *[1]*(self.org_module[0].weight.dim() - 1))

is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear]
is_conv = type(self.sd_module) in [torch.nn.Conv2d]
is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] # unsupported
Expand All @@ -47,35 +54,64 @@ def __init__(self, net: network.Network, weights: network.NetworkWeights):
elif is_other_linear:
self.out_dim = self.sd_module.embed_dim

if self.is_kohya:
self.constraint = self.alpha * self.out_dim
self.num_blocks = self.dim
self.block_size = self.out_dim // self.dim
else:
self.num_blocks = self.dim
self.block_size = self.out_dim // self.dim
self.constraint = (0 if self.alpha is None else self.alpha) * self.out_dim
if self.is_R:
self.constraint = None
self.block_size, self.num_blocks = factorization(self.out_dim, self.dim)
self.block_size = self.dim
self.num_blocks = self.out_dim // self.dim
elif self.is_boft:
self.boft_m = self.oft_blocks.shape[0]
self.num_blocks = self.oft_blocks.shape[1]
self.block_size = self.oft_blocks.shape[2]
self.boft_b = self.block_size

def calc_updown(self, orig_weight):
oft_blocks = self.oft_blocks.to(orig_weight.device)
eye = torch.eye(self.block_size, device=self.oft_blocks.device)

if self.is_kohya:
block_Q = oft_blocks - oft_blocks.transpose(1, 2) # ensure skew-symmetric orthogonal matrix
norm_Q = torch.norm(block_Q.flatten())
new_norm_Q = torch.clamp(norm_Q, max=self.constraint)
block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8))
eye = torch.eye(self.block_size, device=oft_blocks.device)

if not self.is_R:
block_Q = oft_blocks - oft_blocks.transpose(-1, -2) # ensure skew-symmetric orthogonal matrix
if self.constraint != 0:
norm_Q = torch.norm(block_Q.flatten())
new_norm_Q = torch.clamp(norm_Q, max=self.constraint.to(oft_blocks.device))
block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8))
oft_blocks = torch.matmul(eye + block_Q, (eye - block_Q).float().inverse())

R = oft_blocks.to(orig_weight.device)

# This errors out for MultiheadAttention, might need to be handled up-stream
merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size)
merged_weight = torch.einsum(
'k n m, k n ... -> k m ...',
R,
merged_weight
)
merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...')
if not self.is_boft:
# This errors out for MultiheadAttention, might need to be handled up-stream
merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size)
merged_weight = torch.einsum(
'k n m, k n ... -> k m ...',
R,
merged_weight
)
merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...')
else:
# TODO: determine correct value for scale
scale = 1.0
m = self.boft_m
b = self.boft_b
r_b = b // 2
inp = orig_weight
for i in range(m):
bi = R[i] # b_num, b_size, b_size
if i == 0:
# Apply multiplier/scale and rescale into first weight
bi = bi * scale + (1 - scale) * eye
inp = rearrange(inp, "(c g k) ... -> (c k g) ...", g=2, k=2**i * r_b)
inp = rearrange(inp, "(d b) ... -> d b ...", b=b)
inp = torch.einsum("b i j, b j ... -> b i ...", bi, inp)
inp = rearrange(inp, "d b ... -> (d b) ...")
inp = rearrange(inp, "(c k g) ... -> (c g k) ...", g=2, k=2**i * r_b)
merged_weight = inp

# Rescale mechanism
if self.rescale is not None:
merged_weight = self.rescale.to(merged_weight) * merged_weight

updown = merged_weight.to(orig_weight.device) - orig_weight.to(merged_weight.dtype)
output_shape = orig_weight.shape
Expand Down
6 changes: 3 additions & 3 deletions extensions-builtin/Lora/networks.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,11 +260,11 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No

loaded_networks.clear()

networks_on_disk = [available_network_aliases.get(name, None) for name in names]
networks_on_disk = [available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None) for name in names]
if any(x is None for x in networks_on_disk):
list_available_networks()

networks_on_disk = [available_network_aliases.get(name, None) for name in names]
networks_on_disk = [available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None) for name in names]

failed_to_load_networks = []

Expand Down Expand Up @@ -355,7 +355,7 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
"""
Applies the currently selected set of networks to the weights of torch layer self.
If weights already have this particular set of networks applied, does nothing.
If not, restores orginal weights from backup and alters weights according to networks.
If not, restores original weights from backup and alters weights according to networks.
"""

network_layer_name = getattr(self, 'network_layer_name', None)
Expand Down
5 changes: 3 additions & 2 deletions extensions-builtin/Lora/preload.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import os
from modules import paths
from modules.paths_internal import normalized_filepath


def preload(parser):
parser.add_argument("--lora-dir", type=str, help="Path to directory with Lora networks.", default=os.path.join(paths.models_path, 'Lora'))
parser.add_argument("--lyco-dir-backcompat", type=str, help="Path to directory with LyCORIS networks (for backawards compatibility; can also use --lyco-dir).", default=os.path.join(paths.models_path, 'LyCORIS'))
parser.add_argument("--lora-dir", type=normalized_filepath, help="Path to directory with Lora networks.", default=os.path.join(paths.models_path, 'Lora'))
parser.add_argument("--lyco-dir-backcompat", type=normalized_filepath, help="Path to directory with LyCORIS networks (for backawards compatibility; can also use --lyco-dir).", default=os.path.join(paths.models_path, 'LyCORIS'))
5 changes: 4 additions & 1 deletion extensions-builtin/Lora/ui_extra_networks_lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,16 @@ def create_item(self, name, index=None, enable_filter=True):

alias = lora_on_disk.get_alias()

search_terms = [self.search_terms_from_path(lora_on_disk.filename)]
if lora_on_disk.hash:
search_terms.append(lora_on_disk.hash)
item = {
"name": name,
"filename": lora_on_disk.filename,
"shorthash": lora_on_disk.shorthash,
"preview": self.find_preview(path),
"description": self.find_description(path),
"search_term": self.search_terms_from_path(lora_on_disk.filename) + " " + (lora_on_disk.hash or ""),
"search_terms": search_terms,
"local_preview": f"{path}.{shared.opts.samples_format}",
"metadata": lora_on_disk.metadata,
"sort_keys": {'default': index, **self.get_sort_keys(lora_on_disk.filename)},
Expand Down
12 changes: 9 additions & 3 deletions extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,8 @@ onUiLoaded(async() => {
canvas_hotkey_fullscreen: "KeyS",
canvas_hotkey_move: "KeyF",
canvas_hotkey_overlap: "KeyO",
canvas_hotkey_shrink_brush: "KeyQ",
canvas_hotkey_grow_brush: "KeyW",
canvas_disabled_functions: [],
canvas_show_tooltip: true,
canvas_auto_expand: true,
Expand All @@ -227,6 +229,8 @@ onUiLoaded(async() => {
const functionMap = {
"Zoom": "canvas_hotkey_zoom",
"Adjust brush size": "canvas_hotkey_adjust",
"Hotkey shrink brush": "canvas_hotkey_shrink_brush",
"Hotkey enlarge brush": "canvas_hotkey_grow_brush",
"Moving canvas": "canvas_hotkey_move",
"Fullscreen": "canvas_hotkey_fullscreen",
"Reset Zoom": "canvas_hotkey_reset",
Expand Down Expand Up @@ -288,7 +292,7 @@ onUiLoaded(async() => {

// Create tooltip
function createTooltip() {
const toolTipElemnt =
const toolTipElement =
targetElement.querySelector(".image-container");
const tooltip = document.createElement("div");
tooltip.className = "canvas-tooltip";
Expand Down Expand Up @@ -351,7 +355,7 @@ onUiLoaded(async() => {
tooltip.appendChild(tooltipContent);

// Add a hint element to the target element
toolTipElemnt.appendChild(tooltip);
toolTipElement.appendChild(tooltip);
}

//Show tool tip if setting enable
Expand Down Expand Up @@ -686,7 +690,9 @@ onUiLoaded(async() => {
const hotkeyActions = {
[hotkeysConfig.canvas_hotkey_reset]: resetZoom,
[hotkeysConfig.canvas_hotkey_overlap]: toggleOverlap,
[hotkeysConfig.canvas_hotkey_fullscreen]: fitToScreen
[hotkeysConfig.canvas_hotkey_fullscreen]: fitToScreen,
[hotkeysConfig.canvas_hotkey_shrink_brush]: () => adjustBrushSize(elemId, 10),
[hotkeysConfig.canvas_hotkey_grow_brush]: () => adjustBrushSize(elemId, -10)
};

const action = hotkeyActions[event.code];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,14 @@
shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas Hotkeys"), {
"canvas_hotkey_zoom": shared.OptionInfo("Alt", "Zoom canvas", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
"canvas_hotkey_adjust": shared.OptionInfo("Ctrl", "Adjust brush size", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
"canvas_hotkey_shrink_brush": shared.OptionInfo("Q", "Shrink the brush size"),
"canvas_hotkey_grow_brush": shared.OptionInfo("W", "Enlarge the brush size"),
"canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"),
"canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "),
"canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"),
"canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"),
"canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas position"),
"canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, needed for testing"),
"canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"),
"canvas_auto_expand": shared.OptionInfo(True, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"),
"canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"),
"canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size", "Moving canvas","Fullscreen","Reset Zoom","Overlap"]}),
"canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size","Hotkey enlarge brush","Hotkey shrink brush","Moving canvas","Fullscreen","Reset Zoom","Overlap"]}),
}))
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import math

import gradio as gr
from modules import scripts, shared, ui_components, ui_settings, infotext_utils
from modules import scripts, shared, ui_components, ui_settings, infotext_utils, errors
from modules.ui_components import FormColumn


Expand Down Expand Up @@ -42,7 +42,11 @@ def ui(self, is_img2img):
setting_name = extra_options[index]

with FormColumn():
comp = ui_settings.create_setting_component(setting_name)
try:
comp = ui_settings.create_setting_component(setting_name)
except KeyError:
errors.report(f"Can't add extra options for {setting_name} in ui")
continue

self.comps.append(comp)
self.setting_names.append(setting_name)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def latent_blend(settings, a, b, t):

def get_modified_nmask(settings, nmask, sigma):
"""
Converts a negative mask representing the transparency of the original latent vectors being overlayed
Converts a negative mask representing the transparency of the original latent vectors being overlaid
to a mask that is scaled according to the denoising strength for this step.
Where:
Expand Down
17 changes: 6 additions & 11 deletions html/extra-networks-card.html
Original file line number Diff line number Diff line change
@@ -1,14 +1,9 @@
<div class='card' style={style} onclick={card_clicked} data-name="{name}" {sort_keys}>
<div class="card" style="{style}" onclick="{card_clicked}" data-name="{name}" {sort_keys}>
{background_image}
<div class="button-row">
{metadata_button}
{edit_button}
</div>
<div class='actions'>
<div class='additional'>
<span style="display:none" class='search_term{search_only}'>{search_term}</span>
</div>
<span class='name'>{name}</span>
<span class='description'>{description}</span>
<div class="button-row">{copy_path_button}{metadata_button}{edit_button}</div>
<div class="actions">
<div class="additional">{search_terms}</div>
<span class="name">{name}</span>
<span class="description">{description}</span>
</div>
</div>
5 changes: 5 additions & 0 deletions html/extra-networks-copy-path-button.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
<div class="copy-path-button card-button"
title="Copy path to clipboard"
onclick="extraNetworksCopyCardPath(event, '{filename}')"
data-clipboard-text="{filename}">
</div>
4 changes: 4 additions & 0 deletions html/extra-networks-edit-item-button.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
<div class="edit-button card-button"
title="Edit metadata"
onclick="extraNetworksEditUserMetadata(event, '{tabname}', '{extra_networks_tabname}', '{name}')">
</div>
4 changes: 4 additions & 0 deletions html/extra-networks-metadata-button.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
<div class="metadata-button card-button"
title="Show internal metadata"
onclick="extraNetworksRequestMetadata(event, '{extra_networks_tabname}', '{name}')">
</div>
Loading

0 comments on commit 6229a9b

Please sign in to comment.