Stable Diffusion (Diffusers) / Google Colab の環境でControlNet 1.1 を使ってバッチ処理で画像を作成する#1
ライブラリーのインストールとフォルダの作成
#!pip install -q diffusers transformers xformers git+https://github.com/huggingface/accelerate.git
#
!nvidia-smi
!pip install omegaconf
!pip install compel
!pip install -q diffusers transformers accelerate
!pip install -q opencv-contrib-python
!pip install -q controlnet_aux
!pip install natsort
#txt2img出力画像の保存先
!mkdir -p /content/output/txt2img
!mkdir -p /content/output/img2img
!mkdir -p /content/output/controlnet
!mkdir -p /content/input/img2img
!mkdir -p /content/input/controlnet
!mkdir -p /content/model
パラメーターの設定
#@title モード { display-mode: "form" }
#@markdown **Settings**
device = "cuda" #@param ["cpu", "cuda"]
#モデルデータ
#model_id = "/content/drive/MyDrive/StableDiffusion/Model/chilledremix"#@param {type:"string"}
#model_id = "/content/model/anythingV5PrtRE"#@param {type:"string"}
#
model_id = "/content/model/Brav6.safetensors"#@param {type:"string"}
#model_id = "AIARTCHAN/AbyssHellVer3"#@param {type:"string"}
#model_id = "andite/anything-v4.0"#@param {type:"string"}
#model_id = "naclbit/trinart_stable_diffusion_v2"#@param {type:"string"}
#model_id = "stabilityai/stable-diffusion-2-1"#@param {type:"string"}
#model_id = "BanKaiPls/AsianModel"#@param {type:"string"}
#model_id = "runwayml/stable-diffusion-v1-5"#@param {type:"string"}
#VAE
#vae = "stabilityai/sd-vae-ft-ema"#@param {type:"string"}
#
vae = "/content/model/vae-ft-mse-840000-ema-pruned.safetensors"#@param {type:"string"}
#
#
textual_inversion = "embed/EasyNegative"#@param {type:"string"}
#
token = "EasyNegative"#@param {type:"string"}
#
embed_weight_name = "EasyNegative.safetensors"#@param {type:"string"}
#textual_inversion = "/content/model/negative_hand-neg.pt"#@param {type:"string"}
#token = "negative_hand-neg"#@param {type:"string"}
#embed_weight_name = "negative_hand-neg.pt"#@param {type:"string"}
lora_model_id = "/content/model/JapaneseDollLikeness_v15.safetensors"#@param {type:"string"}
lora_weight_name = "JapaneseDollLikeness_v15.safetensors"#@param {type:"string"}
#lora_model_id = "/content/model/flat2.safetensor"#@param {type:"string"}
#lora_weight_name = "flat2.safetensors"#@param {type:"string"}
#ポジティブプロンプト
#
#
prompt = "1girl, bangs, Masterpiece, Detailed eyes, Double eyelid,,"#@param {type:"string"}
#ネガティブプロンプト
#
#
negative_prompt = "low quality, worst quality,"#@param {type:"string"}
#strength
strength = 0.7#@param {type:"number"}
#CFG Scale
CFG_scale = 9#@param {type:"number"}
#ステップ数
Steps = 20#@param {type:"number"}
#seed値
seed="167657154,14705899"#@param {type:"string"}
#seed="-1"#@param {type:"string"}
#seed="-2"#@param {type:"string"}
#生成枚数
batch_count = 4#@param {type:"number"}
#出力画像の横幅
width = 512#@param {type:"number"}
#出力画像の高さ
height = 512#@param {type:"number"}
#画像フォルダ
load_path = "/content/input/img2img"#@param {type:"string"}
controlnet_path = "/content/input/controlnet"#@param {type:"string"}
save_path = "/content/output/img2img"#@param {type:"string"}
#controlnet_processor_id = "Nacholmo/controlnet-qr-pattern-v2"#@param {type:"string"}
#controlnet_processor_id = "monster-labs/control_v1p_sd15_qrcode_monster"#@param {type:"string"}
#controlnet_processor_id = "DionTimmer/controlnet_qrcode-control_v11p_sd21"#@param {type:"string"}
#
controlnet_processor_id = "lllyasviel/control_v11p_sd15_openpose"#@param ["","lllyasviel/control_v11p_sd15_canny","lllyasviel/control_v11p_sd15_mlsd","lllyasviel/control_v11p_sd15_depth","lllyasviel/control_v11p_sd15_normalbae","lllyasviel/control_v11p_sd15_seg","lllyasviel/control_v11p_sd15_inpaint","lllyasviel/control_v11p_sd15_lineart","lllyasviel/control_v11p_sd15s2_lineart_anime","lllyasviel/control_v11p_sd15_openpose","lllyasviel/control_v11p_sd15_scribble","lllyasviel/control_v11p_sd15_softedge","lllyasviel/control_v11e_sd15_shuffle","lllyasviel/control_v11e_sd15_ip2p","lllyasviel/control_v11u_sd15_tile"]{allow-input: true}
controlnet_preprocessor_id = "" #@param ["","canny", "depth_leres", "depth_leres++", "depth_midas", "depth_zoe", "lineart_anime", "lineart_coarse", "lineart_realistic", "mediapipe_face", "mlsd", "normal_bae", "normal_midas", "openpose", "openpose_face", "openpose_faceonly", "openpose_full", "openpose_hand", "scribble_hed", "scribble_pidinet", "shuffle", "softedge_hed", "softedge_hedsafe", "softedge_pidinet", "softedge_pidsafe"] {allow-input: true}
controlnet_conditioning_scale = 1.0#@param {type:"number"}
controlnet_guidance_start = 0#@param {type:"number"}
controlnet_guidance_end = 1.0#@param {type:"number"}
guess_mode=True #@param {type:"boolean"}
controlnet_first_image=False #@param {type:"boolean"}
controlnet_image_loop=True #@param {type:"boolean"}
controlnet_image_resize=False #@param {type:"boolean"}
from_single_file=True #@param {type:"boolean"}
モデルファイルのダウンロードとDiffusers形式への変換
Diffusers形式のモデルファイルが提供されている場合は不要です。しかしシングルファイルを使用する場合には使ってください
!apt-get -y install -qq aria2
#
model_url = "https://civitai.com/api/download/models/90854"#@param {type:"string"}
#
model_name = "anythingV5PrtRE.safetensors"#@param {type:"string"}
#model_url = "https://huggingface.co/BanKaiPls/AsianModel/resolve/main/Brav6.safetensors"#@param {type:"string"}
#model_name = "Brav6.safetensors"#@param {type:"string"}
#
vae_url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.safetensors"#@param {type:"string"}
#
vae_name = "vae-ft-mse-840000-ema-pruned.safetensors"#@param {type:"string"}
#lora_url = "https://huggingface.co/2vXpSwA7/iroiro-lora/resolve/main/release/flat2.safetensors"#@param {type:"string"}
#lora_name = "flat2.safetensors"#@param {type:"string"}
#
lora_url = "https://civitai.com/api/download/models/34562"#@param {type:"string"}
lora_name = "JapaneseDollLikeness_v15.safetensors"#@param {type:"string"}
#lora_url = "https://civitai.com/api/download/models/44566"#@param {type:"string"}
#lora_name = "hipoly_3dcg_v7-epoch-000012.safetensors"#@param {type:"string"}
#
textual_inversion_url = "https://civitai.com/models/56519/negativehand-negative-embedding"#@param {type:"string"}
#
textual_inversion_name = "negative_hand-neg.pt"#@param {type:"string"}
download_only=True #@param {type:"boolean"}
!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $model_url -d /content/model -o $model_name
!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $vae_url -d /content/model -o $vae_name
!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $lora_url -d /content/model -o $lora_name
#!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $textual_inversion_url -d /content/model -o $textual_inversion_name
#model_dir = ""#@param {type:"string"}
if download_only==False:
import diffusers, torch
import os
from diffusers import UniPCMultistepScheduler
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
#
device = "cuda"
#device = "cpu"
pipe = download_from_original_stable_diffusion_ckpt(
"/content/model/"+model_name, from_safetensors=True
)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
pipe.to(device)
pipe.save_pretrained("/content/model/"+os.path.splitext(model_name)[0], safe_serialization=True)
画像生成処理
次のページで説明しています
txt2img
https://memo.eightban.com/stable-diffusion/stable-diffusion-diffusers2
img2img
https://memo.eightban.com/stable-diffusion/stable-diffusion-diffusers3
多機能
https://memo.eightban.com/stable-diffusion/stable-diffusion-diffusers-controlnet-4
画像の表示
左のファイルから表示させることができますが一括で表示させる時に使ってください
import os
import random
import glob
from PIL import Image
from natsort import natsorted
#
import numpy as np
import math
file_list2 = glob.glob(os.path.join(save_path, "*.png"))
file_list2.extend(glob.glob(os.path.join(save_path, "*.jpg")))
images2 = []
for img_path2 in natsorted(file_list2):
open_img2 = Image.open(img_path2)
images2.append(open_img2)
def image_grid(imgs, rows, cols):
# assert len(imgs) == rows * cols
w, h = imgs[0].size
grid = Image.new("RGB", size=(cols * w, rows * h))
grid_w, grid_h = grid.size
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
yy =math.ceil(len(images2) /4)
image_grid(images2, 4, yy+1)
import os
import random
import glob
from PIL import Image
from natsort import natsorted
#
import numpy as np
import math
file_list2 = glob.glob(os.path.join(save_path, "*.png"))
file_list2.extend(glob.glob(os.path.join(save_path, "*.jpg")))
images2 = []
for img_path2 in natsorted(file_list2):
open_img2 = Image.open(img_path2)
images2.append(open_img2)
def image_grid(imgs, rows, cols):
# assert len(imgs) == rows * cols
w, h = imgs[0].size
grid = Image.new("RGB", size=(cols * w, rows * h))
grid_w, grid_h = grid.size
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
yy =math.ceil(len(images2) /4)
image_grid(images2, 4, yy+1)
Googleドライブに接続
from google.colab import drive
drive.mount('/content/drive')
Googleドライブに圧縮
fileName="test" #@param {type:"string"}
!zip -r /content/drive/MyDrive/$fileName /content/output
ファイルの削除
!rm -r /content/output
!mkdir -p /content/output/txt2img
!mkdir -p /content/output/img2img
!mkdir -p /content/output/controlnet
ディスカッション
コメント一覧
まだ、コメントがありません