From 8091ef2fca9bac0271ce72fb5fb19bb0f7df945e Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Wed, 24 May 2023 12:48:08 -0400 Subject: [PATCH] update hiresfix --- CHANGELOG.md | 334 ++++++++++++++++++++++++++++++++++++++++ README.md | 218 ++++++++++---------------- javascript/hires_fix.js | 4 +- modules/ui.py | 14 +- requirements.txt | 2 +- 5 files changed, 420 insertions(+), 152 deletions(-) create mode 100644 CHANGELOG.md diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..ef556b7c3 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,334 @@ +# Change Log for SD.Next + +## Update for 05/24/2023 + +Mostly cosmetic... + +- Updated README.md +- Created CHANGELOG.md + +## Update for 05/23/2023 + +Major internal work with perhaps not that much user-facing to show for it ;) + +- update core repos: **stability-ai**, **taming-transformers**, **k-diffusion, blip**, **codeformer** + note: to avoid disruptions, this is applicable for new installs only +- tested with **torch 2.1**, **cuda 12.1**, **cudnn 8.9** + (production remains on torch2.0.1+cuda11.8+cudnn8.8) +- fully extend support of `--data-dir` + allows multiple installations to share pretty much everything, not just models + especially useful if you want to run in a stateless container or cloud instance +- redo api authentication + now api authentication will use same user/pwd (if specified) for ui and strictly enforce it using httpbasicauth + new authentication is also fully supported in combination with ssl for both sync and async calls + if you want to use api programatically, see examples in `cli/sdapi.py` +- add dark/light theme mode toggle +- redo some `clip-skip` functionality +- better matching for vae vs model +- update to `xyz grid` to allow creation of large number of images without creating grid itself +- update `gradio` (again) +- more prompt parser optimizations +- better error handling when importing image settings which are not compatible with current install + for example, when upscaler or sampler originally used is not available +- fixes...amazing how many issues were introduced by porting a1111 v1.20 code without adding almost no new functionality + next one is v1.30 (still in dev) which does bring a lot of new features + +## Update for 05/17/2023 + +This is a massive one due to huge number of changes, +but hopefully it will go ok... + +- new **prompt parsers** + select in UI -> Settings -> Stable Diffusion + - **Full**: my new implementation + - **A1111**: for backward compatibility + - **Compel**: as used in ComfyUI and InvokeAI (a.k.a *Temporal Weighting*) + - **Fixed**: for really old backward compatibility +- monitor **extensions** install/startup and + log if they modify any packages/requirements + this is a *deep-experimental* python hack, but i think its worth it as extensions modifying requirements + is one of most common causes of issues +- added `--safe` command line flag mode which skips loading user extensions + please try to use it before opening new issue +- reintroduce `--api-only` mode to start server without ui +- port *all* upstream changes from [A1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) + up to today - commit hash `89f9faa` + +## Update for 05/15/2023 + +- major work on **prompt parsing** + this can cause some differences in results compared to what you're used to, but its all about fixes & improvements + - prompt parser was adding commas and spaces as separate words and tokens and/or prefixes + - negative prompt weight using `[word:weight]` was ignored, it was always `0.909` + - bracket matching was anything but correct. complex nested attention brackets are now working. + - btw, if you run with `--debug` flag, you'll now actually see parsed prompt & schedule +- updated all scripts in `/cli` +- add option in settings to force different **latent sampler** instead of using primary only +- add **interrupt/skip** capabilities to process images + +## Update for 05/13/2023 + +This is mostly about optimizations... + +- improved `torch-directml` support + especially interesting for **amd** users on **windows** where **torch+rocm** is not yet available + dont forget to run using `--use-directml` or default is **cpu** +- improved compatibility with **nvidia** rtx 1xxx/2xxx series gpus +- fully working `torch.compile` with **torch 2.0.1** + using `inductor` compile takes a while on first run, but does result in 5-10% performance increase +- improved memory handling + for highest performance, you can also disable aggressive **gc** in settings +- improved performance + especially *after* generate as image handling has been moved to separate thread +- allow per-extension updates in extension manager +- option to reset configuration in settings + +## Update for 05/11/2023 + +- brand new **extension manager** + this is pretty much a complete rewrite, so new issues are possible +- support for `torch` 2.0.1 + note that if you are experiencing frequent hangs, this may be a worth a try +- updated `gradio` to 3.29.0 +- added `--reinstall` flag to force reinstall of all packages +- auto-recover & re-attempt when `--upgrade` is requested but fails +- check for duplicate extensions + +## Update for 05/08/2023 + +Back online with few updates: + +- bugfixes. yup, quite a lot of those +- auto-detect some cpu/gpu capabilities on startup + this should reduce need to tweak and tune settings like no-half, no-half-vae, fp16 vs fp32, etc +- configurable order of top level tabs +- configurable order of scripts in txt2img and img2img + for both, see sections in ui-> settings -> user interface + +## Update for 05/04/2023 + +Again, few days later... + +- reviewed/ported **all** commits from **A1111** upstream + some a few are not applicable as i already have alternative implementations + and very few i choose not to implement (save/restore last-known-good-config is a bad hack) + otherwise, we're fully up to date (its doesn't show on fork status as code merges were mostly manual due to conflicts) + but...due to sheer size of the updates, this may introduce some temporary issues +- redesigned server restart function + now available and working in ui + actually, since server restart is now a true restart and not ui restart, it can be used much more flexibly +- faster model load + plus support for slower devices via stream-load function (in ui settings) +- better logging + this includes new `--debug` flag for more verbose logging when troubleshooting + +## Update for 05/01/2023 + +Been a bit quieter for last few days as changes were quite significant, but finally here we are... + +- Updated core libraries: Gradio, Diffusers, Transformers +- Added support for **Intel ARC** GPUs via Intel OneAPI IPEX (auto-detected) +- Added support for **TorchML** (set by default when running on non-compatible GPU or on CPU) +- Enhanced support for AMD GPUs with **ROCm** +- Enhanced support for Apple **M1/M2** +- Redesigned command params: run `webui --help` for details +- Redesigned API and script processing +- Experimental support for multiple **Torch compile** options +- Improved sampler support +- Google Colab: + Maintained by +- Fixes, fixes, fixes... + +To take advantage of new out-of-the-box tunings, its recommended to delete your `config.json` so new defaults are applied. Its not necessary, but otherwise you may need to play with UI Settings to get the best of Intel ARC, TorchML, ROCm or Apple M1/M2. + +## Update for 04/27/2023 + +a bit shorter list as: + +- i've been busy with buxfixing + there are a lot of them, not going to list each here. + but seems like critical issues backlog is quieting down and soon i can focus on new features development. +- i've started collaboration with couple of major projects, + hopefully this will accelerate future development. + +what's new: + +- ability to view/add/edit model description shown in extra networks cards +- add option to specify fallback sampler if primary sampler is not compatible with desired operation +- make clip skip a local parameter +- remove obsolete items from UI settings +- set defaults for AMD ROCm + if you have issues, you may want to start with a fresh install so configuration can be created from scratch +- set defaults for Apple M1/M2 + if you have issues, you may want to start with a fresh install so configuration can be created from scratch + +## Update for 04/25/2023 + +- update process image -> info +- add VAE info to metadata +- update GPU utility search paths for better GPU type detection +- update git flags for wider compatibility +- update environment tuning +- update ti training defaults +- update VAE search paths +- add compatibility opts for some old extensions +- validate script args for always-on scripts + fixes: deforum with controlnet + +## Update for 04/24/2023 + +- identify race condition where generate locks up while fetching preview +- add pulldowns to x/y/z script +- add VAE rollback feature in case of NaNs +- use samples format for live preview +- add token merging +- use **Approx NN** for live preview +- create default `styles.csv` +- fix setup not installing `tensorflow` dependencies +- update default git flags to reduce number of warnings + +## Update for 04/23/2023 + +- fix VAE dtype + should fix most issues with NaN or black images +- add built-in Gradio themes +- reduce requirements +- more AMD specific work +- initial work on Apple platform support +- additional PR merges +- handle torch cuda crashing in setup +- fix setup race conditions +- fix ui lightbox +- mark tensorflow as optional +- add additional image name templates + +## Update for 04/22/2023 + +- autodetect which system libs should be installed + this is a first pass of autoconfig for **nVidia** vs **AMD** environments +- fix parse cmd line args from extensions +- only install `xformers` if actually selected as desired cross-attention method +- do not attempt to use `xformers` or `sdp` if running on cpu +- merge tomesd token merging +- merge 23 PRs pending from a1111 backlog (!!) + +*expect shorter updates for the next few days as i'll be partically ooo* + +## Update for 04/20/2023 + +- full CUDA tuning section in UI Settings +- improve exif/pnginfo metadata parsing + it can now handle 3rd party images or images edited in external software +- optimized setup performance and logging +- improve compatibility with some 3rd party extensions + for example handle extensions that install packages directly from github urls +- fix initial model download if no models found +- fix vae not found issues +- fix multiple git issues + +note: if you previously had command line optimizations such as --no-half, those are now ignored and moved to ui settings + +## Update for 04/19/2023 + +- fix live preview +- fix model merge +- fix handling of user-defined temp folders +- fix submit benchmark +- option to override `torch` and `xformers` installer +- separate benchmark data for system-info extension +- minor css fixes +- created initial merge backlog from pending prs on a1111 repo + see #258 for details + +## Update for 04/18/2023 + +- reconnect ui to active session on browser restart + this is one of most frequently asked for items, finally figured it out + works for text and image generation, but not for process as there is no progress bar reported there to start with +- force unload `xformers` when not used + improves compatibility with AMD/M1 platforms +- add `styles.csv` to UI settings to allow customizing path +- add `--skip-git` to cmd flags for power users that want + to skip all git checks and operations and perform manual updates +- add `--disable-queue` to cmd flags that disables Gradio queues (experimental) + this forces it to use HTTP instead of WebSockets and can help on unreliable network connections +- set scripts & extensions loading priority and allow custom priorities + fixes random extension issues: + `ScuNet` upscaler dissapearing, `Additional Networks` not showing up on XYZ axis, etc. +- improve html loading order +- remove some `asserts` causing runtime errors and replace with user-friendly messages +- update README.md +- update TODO.md + +## Update for 04/17/2023 + +- **themes** are now dynamic and discovered from list of available gradio themes on huggingface + its quite a list of 30+ supported themes so far +- added option to see **theme preview** without the need to apply it or restart server +- integrated **image info** functionality into **process image** tab and removed separate **image info** tab +- more installer improvements +- fix urls +- updated github integration +- make model download as optional if no models found + +## Update for 04/16/2023 + +- support for ui themes! to to *settings* -> *user interface* -> "ui theme* + includes 12 predefined themes +- ability to restart server from ui +- updated requirements +- removed `styles.csv` from repo, its now fully under user control +- removed model-keyword extension as overly aggresive +- rewrite of the fastapi middleware handlers +- install bugfixes, hopefully new installer is now ok \ + i really want to focus on features and not troubleshooting installer + +## Update for 04/15/2023 + +- update default values +- remove `ui-config.json` from repo, its not fully under user control +- updated extensions mangager +- updated locon/lycoris plugin +- enable quick launch by default +- add multidiffusion upscaler extensions +- add model keyword extension +- enable strong linting +- fix circular imports +- fix extensions updated +- fix git update issues +- update github templates + +## Update for 04/14/2023 + +- handle duplicate extensions +- redo exception handler +- fix generate forever +- enable cmdflags compatibility +- change default css font +- fix ti previews on initial start +- enhance tracebacks +- pin transformers version to last known good version +- fix extension loader + +## Update for 04/12/2023 + +This has been pending for a while, but finally uploaded some massive changes + +- New launcher + - `webui.bat` and `webui.sh`: + Platform specific wrapper scripts that starts `launch.py` in Python virtual environment + *Note*: Server can run without virtual environment, but it is recommended to use it + This is carry-over from original repo + **If you're unsure which launcher to use, this is the one you want** + - `launch.py`: + Main startup script + Can be used directly to start server in manually activated `venv` or to run it without `venv` + - `installer.py`: + Main installer, used by `launch.py` + - `webui.py`: + Main server script +- New logger +- New exception handler +- Built-in performance profiler +- New requirements handling +- Move of most of command line flags into UI Settings diff --git a/README.md b/README.md index 5b98cde34..ff83c9235 100644 --- a/README.md +++ b/README.md @@ -3,163 +3,101 @@ ![License](https://img.shields.io/github/license/vladmandic/human?style=flat-square&svg=true) ![GitHub Status Checks](https://img.shields.io/github/checks-status/vladmandic/human/main?style=flat-square&svg=true) -# Stable Diffusion - Automatic -*Heavily opinionated custom fork of* +# SD.Next -Fork is as close as up-to-date with origin as time allows -All code changes are merged upstream whenever possible +**Stable Diffusion implementation with modern UI and advanced features** -The idea behind the fork is to enable latest technologies and advances in text-to-image generation -*Sometimes this is not the same as "as simple as possible to use"* -If you are looking an amazing simple-to-use Stable Diffusion tool, I'd suggest [InvokeAI](https://invoke-ai.github.io/InvokeAI/) specifically due to its automated installer and ease of use +This project started as a form from [Automatic1111 WebUI](https://github.com/AUTOMATIC1111/stable-diffusion-webui/) and it grew siginificantly since then, but although it diverged significanly, any substantial features to original work is ported to this repository as well -
+Individual features are not listed here, instead check [Changelog](CHANGELOG.md) for full list of changes -### Follow [Development updates](https://github.com/vladmandic/automatic/discussions/99) for daily updates on new features/fixes +## Platform support -
- -![screenshot](javascript/black-orange.jpg) - -
- -## Notes - -### Fork does differ in few things - -- New installer -- Advanced CUDA tuning - Available in UI Settings -- Advanced environment tuning -- Optimized startup and models lazy-loading -- Built-in performance profiler -- Updated libraries to latest known compatible versions -- Includes opinionated **System** and **Options** configuration -- Does not rely on `Accelerate` as it only affects distributed systems - Gradio web server will be initialized much earlier which model load is done in the background - Faster model loading plus ability to fallback on corrupt models -- Uses simplified folder structure - e.g. `/train`, `/outputs/*`, `/models/*`, etc. -- Enhanced training templates -- Built-in `LoRA`, `LyCORIS`, `Custom Diffusion`, `Dreambooth` training -- Majority of settings configurable via UI without the need for command line flags - e.g, cross-optimization methods, system folders, etc. -- New logger -- New error and exception handlers - -### Optimizations - -- Optimized for `Torch` 2.0 -- Runs with `SDP` memory attention enabled by default if supported by system - *Note*: `xFormers` and other cross-optimization methods are still available -- Auto-adjust parameters when running on **CPU** or **CUDA** - *Note:* AMD and M1 platforms are supported, but without out-of-the-box optimizations - -### Integrated Extensions - -Hand-picked list of extensions that are deeply integrated into core workflows: - -- [System Info](https://github.com/vladmandic/sd-extension-system-info) -- [ControlNet](https://github.com/Mikubill/sd-webui-controlnet) -- [Image Browser](https://github.com/AlUlkesh/stable-diffusion-webui-images-browser) -- [LORA](https://github.com/kohya-ss/sd-scripts) *(both training and inference)* -- [LyCORIS](https://github.com/KohakuBlueleaf/LyCORIS) *(both training and inference)* -- [Model Converter](https://github.com/Akegarasu/sd-webui-model-converter) -- [CLiP Interrogator](https://github.com/pharmapsychotic/clip-interrogator-ext) -- [Dynamic Thresholding](https://github.com/mcmonkeyprojects/sd-dynamic-thresholding) -- [Steps Animation](https://github.com/vladmandic/sd-extension-steps-animation) -- [Seed Travel](https://github.com/yownas/seed_travel) -- [Multi-Diffusion Upscaler](https://github.com/pkuliyi2015/multidiffusion-upscaler-for-automatic1111) - -### User Interface - -- Includes updated **UI**: reskinned and reorganized - Black and orange dark theme with fixed width options panels and larger previews -- Includes support for **Gradio themes** - *Settings* -> *User interface* -> *UI theme* - Link to themes list & previews: - -### Removed - -- Drops compatibility with older versions of `python` and requires **3.9** or **3.10** -- Drops localizations - -### Integrated CLI/API tools - -Fork adds extra functionality: - -- New skin and UI layout -- Ships with set of **CLI** tools that rely on *SD API* for execution: - e.g. `generate`, `train`, `bench`, etc. - [Full list]() - -
+- **nVidia** GPUs using **CUDA** libraries on both *Windows and Linux* +- **AMD** GPUs using **ROCm** libraries on *Linux* + Support will be extended to *Windows* once AMD releases ROCm for Windows +- Any GPU compatibile with **DirectX** on *Windows* using **DirectML** libraries + This includes support for AMD GPUs that are not supported by native ROCm libraries +- **Intel Arc** GPUs using Intel OneAPI **Ipex/XPU** libraries +- **Apple M1/M2** on *OSX* using built-in support in Torch with some platform optimizations ## Install 1. Install first: **Python** & **Git** -2. If you have nVidia GPU, install nVidia CUDA toolkit: - -3. Clone repository +2. Clone repository `git clone https://github.com/vladmandic/automatic` +3. Run launcher + `webui.bat` or `webui.sh`: + - Platform specific wrapper scripts For Windows, Linux and OSX + - Starts `launch.py` in a Python virtual environment (`venv`) + - Uses `install.py` to handle all actual requirements and dependencies + - *Note*: Server can run without virtual environment, but it is recommended to use it to avoid library version conflicts with other applications -## Run - -Run desired startup script to install dependencies and extensions and start server: - -- `webui.bat` and `webui.sh`: - Platform specific wrapper scripts For Windows, Linux and OSX - Starts `launch.py` in a Python virtual environment (venv) - *Note*: Server can run without virtual environment, but it is recommended to use it to avoid library version conflicts with other applications - **If you're unsure which launcher to use, this is the one you want** -- `launch.py`: - Main startup script - Can be used directly to start server in a manually activated `venv` or to run server without `venv` -- `setup.py`: - Main installer, used by `launch.py` - Can also be used directly to update repository or extensions - If running manually, make sure to activate `venv` first (if used) -- `webui.py`: - Main server script - -Any of the above scripts can be used with `--help` to display detailed usage information and available parameters -For example: -> webui.bat --help +*Note*: **nVidia/CUDA** and **AMD/ROCm** are auto-detected is present and available, but for any other use case specify required parameter explicitly or wrong packages may be installed as installer will assume CPU-only environment Full startup sequence is logged in `setup.log`, so if you encounter any issues, please check it first -## Update +Below is partial list of all available parameters, run `webui --help` for the full list: -The launcher can perform automatic update of main repository, requirements, extensions and submodules: + Setup options: + --use-ipex Use Intel OneAPI XPU backend, default: False + --use-directml Use DirectML if no compatible GPU is detected, default: False + --use-cuda Force use nVidia CUDA backend, default: False + --use-rocm Force use AMD ROCm backend, default: False + --skip-update Skip update of extensions and submodules, default: False + --skip-requirements Skips checking and installing requirements, default: False + --skip-extensions Skips running individual extension installers, default: False + --skip-git Skips running all GIT operations, default: False + --skip-torch Skips running Torch checks, default: False + --reinstall Force reinstallation of all requirements, default: False + --debug Run installer with debug logging, default: False + --reset Reset main repository to latest version, default: False + --upgrade Upgrade main repository to latest version, default: False + --safe Run in safe mode with no user extensions -- **Main repository**: - Update is *not* performed by default, enable with `--upgrade` flag -- **Requirements**: - Check is performed on each startup and missing requirements are auto-installed - Can be skipped with `--skip-requirements` flag -- **Extensions and submodules**: - Update is performed on each startup and installer for each extension is started - Can be skipped with `--skip-extensions` flag -- **Quick mode**: Automatically enabled if timestamp of last sucessful setup is newer than actual repository version or version of newest extension - -
- -## Other - -### Scripts - -This repository comes with a large collection of scripts that can be used to process inputs, train, generate, and benchmark models -As well as number of auxiliary scripts that do not rely on **WebUI**, but can be used for end-to-end solutions such as extract frames from videos, etc. -For full details see [Docs](cli/README.md) - -
- -### Docs - -- Scripts are in [Scripts](cli/README.md) -- Everything else is in [Wiki](https://github.com/vladmandic/automatic/wiki) -- Except my current [TODO](TODO.md) +
![screenshot](javascript/black-orange.jpg)
+ +## Notes + +### **Collab** + +- To avoid having this repo rely just on me, I'd love to have additional maintainers with full admin rights. If you're interested, ping me! +- In addition to general cross-platform code, desire is to have a lead for each of the main platforms +This should be fully cross-platform, but I would really love to have additional contibutors and/or maintainers to join and help lead the effords on different platforms + +### **Goals** + +The idea behind the fork is to enable latest technologies and advances in text-to-image generation +*Sometimes this is not the same as "as simple as possible to use"* +If you are looking an amazing simple-to-use Stable Diffusion tool, I'd suggest [InvokeAI](https://invoke-ai.github.io/InvokeAI/) specifically due to its automated installer and ease of use + +General goals: + +- Cross-platform + - Create uniform experience while automatically managing any platform specific differences +- Performance + - Enable best possible performance on all platforms +- Ease-of-Use + - Automatically handle all requirements, dependencies, flags regardless of platform + - Integrate all best options for uniform out-of-the-box experience without the need to tweak anything manually +- Look-and-Feel + - Create modern, intuitive and clean UI +- Up-to-Date + - Keep code up to date with latest advanced in text-to-image generation + +## Credits + +- Main credit goes to [Automatic1111 WebUI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) +- Additional credits are listed in [Credits](https://github.com/AUTOMATIC1111/stable-diffusion-webui/#credits) +- Licenses for modules are listed in [Licenses](html/licenses.html) + +### **Docs** + +- [Radme](README.md) +- [ToDo](TODO.md) +- [Changelog](CHANGELOG.md) +- [CLI Tools](cli/README.md)
diff --git a/javascript/hires_fix.js b/javascript/hires_fix.js index b71f4ddbb..d4ae4586e 100644 --- a/javascript/hires_fix.js +++ b/javascript/hires_fix.js @@ -1,5 +1,5 @@ /* global gradioApp, opts */ -function onCalcResolutionHires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y) { +function onCalcResolutionHires(enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y) { function setInactive(elem, inactive) { elem.classList.toggle('inactive', !!inactive); } @@ -10,5 +10,5 @@ function onCalcResolutionHires(enable, width, height, hr_scale, hr_resize_x, hr_ setInactive(hrUpscaleBy, opts.use_old_hires_fix_width_height || hr_resize_x > 0 || hr_resize_y > 0); setInactive(hrResizeX, opts.use_old_hires_fix_width_height || hr_resize_x == 0); setInactive(hrResizeY, opts.use_old_hires_fix_width_height || hr_resize_y == 0); - return [enable, width, height, hr_scale, hr_resize_x, hr_resize_y]; + return [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y]; } diff --git a/modules/ui.py b/modules/ui.py index 044b3d957..6f2a762d8 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -39,6 +39,7 @@ if not cmd_opts.share and not cmd_opts.listen: def gr_show(visible=True): + print('HERE1') return {"visible": visible, "__type__": "update"} @@ -378,10 +379,11 @@ def create_ui(): with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options: with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"): hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*modules.shared.latent_upscale_modes, *[x.name for x in modules.shared.sd_upscalers]], value=modules.shared.latent_upscale_default_mode) - hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") + hr_second_pass_steps = gr.Slider(minimum=0, maximum=99, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"): + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") + with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact"): hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") elif category == "override_settings": @@ -394,15 +396,9 @@ def create_ui(): for preview_input in hr_resolution_preview_inputs: preview_input.change( fn=calc_resolution_hires, - inputs=hr_resolution_preview_inputs, - outputs=[hr_final_resolution], - show_progress=False, - ) - preview_input.change( - None, _js="onCalcResolutionHires", inputs=hr_resolution_preview_inputs, - outputs=[], + outputs=[hr_final_resolution], show_progress=False, ) diff --git a/requirements.txt b/requirements.txt index 6b0763cbd..be364a4ce 100644 --- a/requirements.txt +++ b/requirements.txt @@ -53,7 +53,7 @@ diffusers==0.16.1 einops==0.4.1 gradio==3.32.0 numexpr==2.8.4 -numpy==1.24.3 +numpy==1.23.5 numba==0.57.0 pandas==1.5.3 protobuf==3.20.3