codeShare commited on
Commit
6e48c15
·
verified ·
1 Parent(s): c151afd

Upload Joycaption_Alpha_One.ipynb

Browse files
Files changed (1) hide show
  1. Joycaption_Alpha_One.ipynb +1 -1
Joycaption_Alpha_One.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"Dwr7gk5OwuGC"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"1X-s_s971qB7"},"outputs":[],"source":["!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","# @markdown Use a custom prompt to instruct the image captioning model\n","custom_prompt = '' # @param {type:'string'}\n","enable_custom_prompt = False # @param {type:'boolean'}\n","if not enable_custom_prompt: custom_prompt = 'Describe the image in 400 words'\n","!pip install peft bitsandbytes\n","!pip install hf_xet\n","from huggingface_hub import InferenceClient\n","from torch import nn\n","from transformers import AutoModel, BitsAndBytesConfig, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM\n","import torch\n","import torch.amp.autocast_mode\n","from PIL import Image\n","import os\n","import torchvision.transforms.functional as TVF\n","\n","CLIP_PATH = \"google/siglip-so400m-patch14-384\"\n","MODEL_PATH = \"unsloth/Meta-Llama-3.1-8B-bnb-4bit\"\n","CAPTION_TYPE_MAP = {\n"," (\"descriptive\", \"formal\", False, False): [f\"{custom_prompt}\"],\n"," (\"descriptive\", \"formal\", False, True): [\"Write a descriptive caption for this image in a formal tone within {word_count} words.\"],\n"," (\"descriptive\", \"formal\", True, False): [\"Write a {length} descriptive caption for this image in a formal tone.\"],\n"," (\"descriptive\", \"informal\", False, False): [\"Write a descriptive caption for this image in a casual tone.\"],\n"," (\"descriptive\", \"informal\", False, True): [\"Write a descriptive caption for this image in a casual tone within {word_count} words.\"],\n"," (\"descriptive\", \"informal\", True, False): [\"Write a {length} descriptive caption for this image in a casual tone.\"],\n"," (\"training_prompt\", \"formal\", False, False): [\"Write a stable diffusion prompt for this image.\"],\n"," (\"training_prompt\", \"formal\", False, True): [\"Write a stable diffusion prompt for this image within {word_count} words.\"],\n"," (\"training_prompt\", \"formal\", True, False): [\"Write a {length} stable diffusion prompt for this image.\"],\n"," (\"rng-tags\", \"formal\", False, False): [\"Write a list of Booru tags for this image.\"],\n"," (\"rng-tags\", \"formal\", False, True): [\"Write a list of Booru tags for this image within {word_count} words.\"],\n"," (\"rng-tags\", \"formal\", True, False): [\"Write a {length} list of Booru tags for this image.\"],\n","}\n","\n","class ImageAdapter(nn.Module):\n","\tdef __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):\n","\t\tsuper().__init__()\n","\t\tself.deep_extract = deep_extract\n","\t\tif self.deep_extract:\n","\t\t\tinput_features = input_features * 5\n","\t\tself.linear1 = nn.Linear(input_features, output_features)\n","\t\tself.activation = nn.GELU()\n","\t\tself.linear2 = nn.Linear(output_features, output_features)\n","\t\tself.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)\n","\t\tself.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))\n","\t\tself.other_tokens = nn.Embedding(3, output_features)\n","\t\tself.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3\n","\tdef forward(self, vision_outputs: torch.Tensor):\n","\t\tif self.deep_extract:\n","\t\t\tx = torch.concat((\n","\t\t\t\tvision_outputs[-2],\n","\t\t\t\tvision_outputs[3],\n","\t\t\t\tvision_outputs[7],\n","\t\t\t\tvision_outputs[13],\n","\t\t\t\tvision_outputs[20],\n","\t\t\t), dim=-1)\n","\t\t\tassert len(x.shape) == 3, f\"Expected 3, got {len(x.shape)}\" # batch, tokens, features\n","\t\t\tassert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f\"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}\"\n","\t\telse:\n","\t\t\tx = vision_outputs[-2]\n","\t\tx = self.ln1(x)\n","\t\tif self.pos_emb is not None:\n","\t\t\tassert x.shape[-2:] == self.pos_emb.shape, f\"Expected {self.pos_emb.shape}, got {x.shape[-2:]}\"\n","\t\t\tx = x + self.pos_emb\n","\t\tx = self.linear1(x)\n","\t\tx = self.activation(x)\n","\t\tx = self.linear2(x)\n","\t\tother_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))\n","\t\tassert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f\"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}\"\n","\t\tx = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)\n","\t\treturn x\n","\tdef get_eot_embedding(self):\n","\t\treturn self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)\n","\n","clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)\n","clip_model = AutoModel.from_pretrained(CLIP_PATH)\n","clip_model = clip_model.vision_model\n","checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n","checkpoint = {k.replace(\"_orig_mod.module.\", \"\"): v for k, v in checkpoint.items()}\n","clip_model.load_state_dict(checkpoint)\n","# del checkpoint\n","clip_model.eval()\n","clip_model.requires_grad_(False)\n","clip_model.to(\"cuda\")\n","tokenizer = AutoTokenizer.from_pretrained(f'{MODEL_PATH}')\n","#tokenizer = AutoTokenizer.from_pretrained(\"unsloth/Meta-Llama-3.1-8B-bnb-4bit\")\n","assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f\"Tokenizer is of type {type(tokenizer)}\"\n","text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, quantization_config = BitsAndBytesConfig(load_in_8bit=True), device_map=\"auto\", torch_dtype=torch.bfloat16)\n","text_model.load_adapter(\"/content/joy/text_model\")\n","text_model.eval()\n","image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False)\n","image_adapter.load_state_dict(torch.load(\"/content/joy/image_adapter.pt\", map_location=\"cpu\"))\n","image_adapter.eval()\n","image_adapter.to(\"cuda\")"]},{"cell_type":"code","source":["# @markdown Use a custom prompt to instruct the image captioning model\n","custom_prompt = '' # @param {type:'string'}\n","enable_custom_prompt = True # @param {type:'boolean'}\n","if not enable_custom_prompt: custom_prompt = 'Describe the image in 400 words'\n","\n","CAPTION_TYPE_MAP = {\n"," (\"descriptive\", \"formal\", False, False): [f\"{custom_prompt}\"],\n"," (\"descriptive\", \"formal\", False, True): [\"Write a descriptive caption for this image in a formal tone within {word_count} words.\"],\n"," (\"descriptive\", \"formal\", True, False): [\"Write a {length} descriptive caption for this image in a formal tone.\"],\n"," (\"descriptive\", \"informal\", False, False): [\"Write a descriptive caption for this image in a casual tone.\"],\n"," (\"descriptive\", \"informal\", False, True): [\"Write a descriptive caption for this image in a casual tone within {word_count} words.\"],\n"," (\"descriptive\", \"informal\", True, False): [\"Write a {length} descriptive caption for this image in a casual tone.\"],\n"," (\"training_prompt\", \"formal\", False, False): [\"Write a stable diffusion prompt for this image.\"],\n"," (\"training_prompt\", \"formal\", False, True): [\"Write a stable diffusion prompt for this image within {word_count} words.\"],\n"," (\"training_prompt\", \"formal\", True, False): [\"Write a {length} stable diffusion prompt for this image.\"],\n"," (\"rng-tags\", \"formal\", False, False): [\"Write a list of Booru tags for this image.\"],\n"," (\"rng-tags\", \"formal\", False, True): [\"Write a list of Booru tags for this image within {word_count} words.\"],\n"," (\"rng-tags\", \"formal\", True, False): [\"Write a {length} list of Booru tags for this image.\"],\n","}"],"metadata":{"id":"_qrUZ7jRIxIf"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"PjO3Wc4kzR08"},"outputs":[],"source":["# @markdown higher temperature = prompt creativity (default 0.6) <br> higher top_p = higher noise reduction in latent embedding (default 0.9)\n","temperature = 1.75 # @param {type:'slider',min:0.5,max:4.0,step:0.05}\n","top_p = 0.75 # @param {type:'slider',min:0.1,max:0.95,step:0.05}\n","temperature = float(temperature)\n","top_p = float(top_p)\n","prompt_str = 'invent words you think about when seeing this image'\n","#-----#\n","num=1\n","\n","@torch.no_grad()\n","def stream_chat(input_image: Image.Image, caption_type: str, caption_tone: str, caption_length: str | int) -> str:\n"," torch.cuda.empty_cache()\n"," length = None if caption_length == \"any\" else caption_length\n"," if isinstance(length, str):\n"," try:\n"," length = int(length)\n"," except ValueError:\n"," pass\n"," if caption_type == \"rng-tags\" or caption_type == \"training_prompt\":\n"," caption_tone = \"formal\"\n"," prompt_key = (caption_type, caption_tone, isinstance(length, str), isinstance(length, int))\n"," if prompt_key not in CAPTION_TYPE_MAP:\n"," raise ValueError(f\"Invalid caption type: {prompt_key}\")\n"," prompt_str = CAPTION_TYPE_MAP[prompt_key][0].format(length=length, word_count=length)\n"," print(f\"Prompt: {prompt_str}\")\n"," image = input_image.resize((384, 384), Image.LANCZOS)\n"," pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0\n"," pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])\n"," pixel_values = pixel_values.to('cuda')\n"," prompt = tokenizer.encode(prompt_str, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)\n"," with torch.amp.autocast_mode.autocast('cuda', enabled=True):\n"," vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)\n"," image_features = vision_outputs.hidden_states\n"," embedded_images = image_adapter(image_features)\n"," embedded_images = embedded_images.to('cuda')\n"," prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))\n"," assert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f\"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}\"\n"," embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))\n"," eot_embed = image_adapter.get_eot_embedding().unsqueeze(0).to(dtype=text_model.dtype)\n"," inputs_embeds = torch.cat([\n"," embedded_bos.expand(embedded_images.shape[0], -1, -1),\n"," embedded_images.to(dtype=embedded_bos.dtype),\n"," prompt_embeds.expand(embedded_images.shape[0], -1, -1),\n"," eot_embed.expand(embedded_images.shape[0], -1, -1),\n"," ], dim=1)\n"," input_ids = torch.cat([\n"," torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),\n"," torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),\n"," prompt,\n"," torch.tensor([[tokenizer.convert_tokens_to_ids(\"<|eot_id|>\")]], dtype=torch.long),\n"," ], dim=1).to('cuda')\n"," attention_mask = torch.ones_like(input_ids)\n"," generate_ids = text_model.generate(input_ids, top_p = top_p , temperature=temperature, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=3000, do_sample=True, suppress_tokens=None) # Uses the default which is temp=0.6, top_p=0.9\n"," generate_ids = generate_ids[:, input_ids.shape[1]:]\n"," if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids(\"<|eot_id|>\"):\n"," generate_ids = generate_ids[:, :-1]\n"," caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]\n"," caption = f'{caption.strip()}'.replace('Prompt: Describe the image in 400 words','')\n"," return caption"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"mhccTDyzirVn"},"outputs":[],"source":["# @markdown Split the image into 20 parts prior to running\n","no_parts = 20 # @param {type:'slider', min:1,max:30,step:1}\n","print(f'Splitting all images found under /content/... \\n into {no_parts} along x-axis')\n","import os,math\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","split_folder = f'/content/split/'\n","my_mkdirs(f'{split_folder}')\n","\n","\n","src_folder = '/content/'\n","suffixes = ['.gif','.png', '.jpeg' , '.webp' , '.jpg']\n","#num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," while os.path.exists(f'{tgt_folder}{num}.txt'):num = num+1\n"," print(filename)\n"," %cd {src_folder}\n"," #os.remove(f'{filename}')\n"," #continue\n"," image = Image.open(f\"{filename}\").convert('RGB')\n"," w,h=image.size\n"," #grid = product(range(0, h-h%d, d), range(0, w-w%d, d))\n"," divs=no_parts\n"," step=math.floor(w/divs)\n"," %cd {split_folder}\n"," for index in range(divs):\n"," box = (step*index, 0 ,step*(index+1),math.floor(0.8*h))\n"," image.crop(box).save(f'{num}_{index}.jpeg','JPEG')\n"," num = num+1\n"," #caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n"," #print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n"," #print(caption)\n"," #---------#\n"," #f = open(f\"{num}.txt\", \"w\")\n"," #f.write(f'{caption}')\n"," #f.close()\n"," #input_image.save(f'{num}.jpeg', \"JPEG\")\n"," os.remove(f\"{src_folder}{filename}\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"J811UZU6xZEo"},"outputs":[],"source":["\n","import os\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","my_mkdirs(f'{tgt_folder}')\n","\n","src_folder = '/content'\n","if os.path.exists(f'{split_folder}'): src_folder = f'{split_folder}'\n","suffixes = ['.gif','.png', '.jpeg' , '.webp' , '.jpg']\n","num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," while os.path.exists(f'{tgt_folder}{num}.txt'):num = num+1\n"," print(filename)\n"," %cd {src_folder}\n"," input_image = Image.open(f\"{filename}\").convert('RGB')\n"," caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n"," print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n"," print(caption)\n"," #---------#\n"," %cd {tgt_folder}\n"," f = open(f\"{num}.txt\", \"w\")\n"," f.write(f'{caption}')\n"," f.close()\n"," input_image.save(f'{num}.jpeg', \"JPEG\")\n"," os.remove(f\"{src_folder}{filename}\")\n"," num = num+1"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"5EztLCjkPq4U"},"outputs":[],"source":["import shutil\n","%cd /content/\n","shutil.make_archive('/content/tmp', 'zip', '/content/tmp')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"kM4TpfdB1amt"},"outputs":[],"source":["\n","from google.colab import runtime\n","#runtime.unassign() #Disconnect from runtime"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1752593897385},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1752405756026},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1748859170548},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747227021653},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747225778912},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747224652750},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1746209168116},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1746181687155},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1742303655056},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740768524003},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740657473013},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739796923572},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739735627072}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}
 
1
+ {"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"Dwr7gk5OwuGC"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"1X-s_s971qB7"},"outputs":[],"source":["!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","# @markdown Use a custom prompt to instruct the image captioning model\n","custom_prompt = 'Write a natural language prompt using e621 and/or danbooru tags in multiple sentences of 400 words or tags' # @param {type:'string'}\n","enable_custom_prompt = False # @param {type:'boolean'}\n","if not enable_custom_prompt: custom_prompt = 'Describe the image in 400 words'\n","!pip install peft bitsandbytes\n","!pip install hf_xet\n","from huggingface_hub import InferenceClient\n","from torch import nn\n","from transformers import AutoModel, BitsAndBytesConfig, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM\n","import torch\n","import torch.amp.autocast_mode\n","from PIL import Image\n","import os\n","import torchvision.transforms.functional as TVF\n","\n","CLIP_PATH = \"google/siglip-so400m-patch14-384\"\n","MODEL_PATH = \"unsloth/Meta-Llama-3.1-8B-bnb-4bit\"\n","CAPTION_TYPE_MAP = {\n"," (\"descriptive\", \"formal\", False, False): [f\"{custom_prompt}\"],\n"," (\"descriptive\", \"formal\", False, True): [\"Write a descriptive caption for this image in a formal tone within {word_count} words.\"],\n"," (\"descriptive\", \"formal\", True, False): [\"Write a {length} descriptive caption for this image in a formal tone.\"],\n"," (\"descriptive\", \"informal\", False, False): [\"Write a descriptive caption for this image in a casual tone.\"],\n"," (\"descriptive\", \"informal\", False, True): [\"Write a descriptive caption for this image in a casual tone within {word_count} words.\"],\n"," (\"descriptive\", \"informal\", True, False): [\"Write a {length} descriptive caption for this image in a casual tone.\"],\n"," (\"training_prompt\", \"formal\", False, False): [\"Write a stable diffusion prompt for this image.\"],\n"," (\"training_prompt\", \"formal\", False, True): [\"Write a stable diffusion prompt for this image within {word_count} words.\"],\n"," (\"training_prompt\", \"formal\", True, False): [\"Write a {length} stable diffusion prompt for this image.\"],\n"," (\"rng-tags\", \"formal\", False, False): [\"Write a list of Booru tags for this image.\"],\n"," (\"rng-tags\", \"formal\", False, True): [\"Write a list of Booru tags for this image within {word_count} words.\"],\n"," (\"rng-tags\", \"formal\", True, False): [\"Write a {length} list of Booru tags for this image.\"],\n","}\n","\n","class ImageAdapter(nn.Module):\n","\tdef __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):\n","\t\tsuper().__init__()\n","\t\tself.deep_extract = deep_extract\n","\t\tif self.deep_extract:\n","\t\t\tinput_features = input_features * 5\n","\t\tself.linear1 = nn.Linear(input_features, output_features)\n","\t\tself.activation = nn.GELU()\n","\t\tself.linear2 = nn.Linear(output_features, output_features)\n","\t\tself.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)\n","\t\tself.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))\n","\t\tself.other_tokens = nn.Embedding(3, output_features)\n","\t\tself.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3\n","\tdef forward(self, vision_outputs: torch.Tensor):\n","\t\tif self.deep_extract:\n","\t\t\tx = torch.concat((\n","\t\t\t\tvision_outputs[-2],\n","\t\t\t\tvision_outputs[3],\n","\t\t\t\tvision_outputs[7],\n","\t\t\t\tvision_outputs[13],\n","\t\t\t\tvision_outputs[20],\n","\t\t\t), dim=-1)\n","\t\t\tassert len(x.shape) == 3, f\"Expected 3, got {len(x.shape)}\" # batch, tokens, features\n","\t\t\tassert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f\"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}\"\n","\t\telse:\n","\t\t\tx = vision_outputs[-2]\n","\t\tx = self.ln1(x)\n","\t\tif self.pos_emb is not None:\n","\t\t\tassert x.shape[-2:] == self.pos_emb.shape, f\"Expected {self.pos_emb.shape}, got {x.shape[-2:]}\"\n","\t\t\tx = x + self.pos_emb\n","\t\tx = self.linear1(x)\n","\t\tx = self.activation(x)\n","\t\tx = self.linear2(x)\n","\t\tother_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))\n","\t\tassert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f\"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}\"\n","\t\tx = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)\n","\t\treturn x\n","\tdef get_eot_embedding(self):\n","\t\treturn self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)\n","\n","clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)\n","clip_model = AutoModel.from_pretrained(CLIP_PATH)\n","clip_model = clip_model.vision_model\n","checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n","checkpoint = {k.replace(\"_orig_mod.module.\", \"\"): v for k, v in checkpoint.items()}\n","clip_model.load_state_dict(checkpoint)\n","# del checkpoint\n","clip_model.eval()\n","clip_model.requires_grad_(False)\n","clip_model.to(\"cuda\")\n","tokenizer = AutoTokenizer.from_pretrained(f'{MODEL_PATH}')\n","#tokenizer = AutoTokenizer.from_pretrained(\"unsloth/Meta-Llama-3.1-8B-bnb-4bit\")\n","assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f\"Tokenizer is of type {type(tokenizer)}\"\n","text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, quantization_config = BitsAndBytesConfig(load_in_8bit=True), device_map=\"auto\", torch_dtype=torch.bfloat16)\n","text_model.load_adapter(\"/content/joy/text_model\")\n","text_model.eval()\n","image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False)\n","image_adapter.load_state_dict(torch.load(\"/content/joy/image_adapter.pt\", map_location=\"cpu\"))\n","image_adapter.eval()\n","image_adapter.to(\"cuda\")"]},{"cell_type":"code","source":["#urls=[\"https://imx.to/i/67yoyb\",\"https://imx.to/i/67yovt\",\"https://imx.to/i/67yozl\",\"https://imx.to/i/67yows\",\"https://imx.to/i/67youk\",\"https://imx.to/i/67yoxy\",\"https://imx.to/i/67yoz9\",\"https://imx.to/i/67yown\",\"https://imx.to/i/67youc\",\"https://imx.to/i/67yovj\",\"https://imx.to/i/67yoyw\",\"https://imx.to/i/67yowf\",\"https://imx.to/i/67yp0d\",\"https://imx.to/i/67yoxb\",\"https://imx.to/i/67yov6\",\"https://imx.to/i/67yoym\",\"https://imx.to/i/67yozz\",\"https://imx.to/i/67yox4\",\"https://imx.to/i/67yow1\",\"https://imx.to/i/67yowz\",\"https://imx.to/i/67youn\",\"https://imx.to/i/67yovy\",\"https://imx.to/i/67yozm\",\"https://imx.to/i/67yowt\",\"https://imx.to/i/67yoxz\",\"https://imx.to/i/67yovn\",\"https://imx.to/i/67yozb\",\"https://imx.to/i/67you7\",\"https://imx.to/i/67yoxc\",\"https://imx.to/i/67yov4\",\"https://imx.to/i/67yow6\",\"https://imx.to/i/67yozv\",\"https://imx.to/i/67yovv\",\"https://imx.to/i/67yozo\",\"https://imx.to/i/67youh\",\"https://imx.to/i/67yowm\",\"https://imx.to/i/67youd\",\"https://imx.to/i/67yoxr\",\"https://imx.to/i/67yovf\",\"https://imx.to/i/67yoyx\",\"https://imx.to/i/67yowg\",\"https://imx.to/i/67yp0e\",\"https://imx.to/i/67yoxh\",\"https://imx.to/i/67yov7\",\"https://imx.to/i/67yoyl\",\"https://imx.to/i/67yp02\",\"https://imx.to/i/67yox3\",\"https://imx.to/i/67youw\",\"https://imx.to/i/67yoyh\",\"https://imx.to/i/67yow2\",\"https://imx.to/i/67yozu\",\"https://imx.to/i/67yox0\",\"https://imx.to/i/67xj9p\",\"https://imx.to/i/67yowo\",\"https://imx.to/i/67yoxk\",\"https://imx.to/i/67yovi\",\"https://imx.to/i/67xjad\",\"https://imx.to/i/67yoyo\",\"https://imx.to/i/67yow7\",\"https://imx.to/i/67youx\",\"https://imx.to/i/67xjar\",\"https://imx.to/i/67xja5\",\"https://imx.to/i/67you6\",\"https://imx.to/i/67yoxt\",\"https://imx.to/i/67yoyy\",\"https://imx.to/i/67yp0f\",\"https://imx.to/i/67yoxl\",\"https://imx.to/i/67yow8\",\"https://imx.to/i/67xja3\",\"https://imx.to/i/67yp04\",\"https://imx.to/i/67youv\",\"https://imx.to/i/67yozw\",\"https://imx.to/i/67xjaj\",\"https://imx.to/i/67yoye\",\"https://imx.to/i/67yowy\",\"https://imx.to/i/67youl\",\"https://imx.to/i/67xjac\",\"https://imx.to/i/67yoy4\",\"https://imx.to/i/67yovq\",\"https://imx.to/i/67yozf\",\"https://imx.to/i/67yowq\",\"https://imx.to/i/67youb\",\"https://imx.to/i/67yove\",\"https://imx.to/i/67yoz3\",\"https://imx.to/i/67xja1\",\"https://imx.to/i/67yowi\",\"https://imx.to/i/67yoxm\",\"https://imx.to/i/67yov9\",\"https://imx.to/i/67yoyq\",\"https://imx.to/i/67yp05\",\"https://imx.to/i/67youz\",\"https://imx.to/i/67yozx\",\"https://imx.to/i/67yoyf\",\"https://imx.to/i/67xjaa\",\"https://imx.to/i/67yovx\",\"https://imx.to/i/67yozq\",\"https://imx.to/i/67yovr\",\"https://imx.to/i/67xja6\",\"https://imx.to/i/67yoxn\",\"https://imx.to/i/67yovc\",\"https://imx.to/i/67yow9\",\"https://imx.to/i/67yox8\",\"https://imx.to/i/67yoyj\",\"https://imx.to/i/67yow5\",\"https://imx.to/i/67xjao\",\"https://imx.to/i/67yox1\",\"https://imx.to/i/67youq\",\"https://imx.to/i/67yoyg\",\"https://imx.to/i/67youi\",\"https://imx.to/i/67yovs\",\"https://imx.to/i/67yoxv\",\"https://imx.to/i/67yoz5\",\"https://imx.to/i/67yp0g\",\"https://imx.to/i/67yoxo\",\"https://imx.to/i/67yoys\",\"https://imx.to/i/67yowa\",\"https://imx.to/i/67yp09\",\"https://imx.to/i/67yox7\",\"https://imx.to/i/67youy\",\"https://imx.to/i/67yoyk\",\"https://imx.to/i/67yp00\",\"https://imx.to/i/67yozr\",\"https://imx.to/i/67yowx\",\"https://imx.to/i/67youo\",\"https://imx.to/i/67yoy5\",\"https://imx.to/i/67xja9\",\"https://imx.to/i/67yoxp\",\"https://imx.to/i/67yoyt\",\"https://imx.to/i/67yp08\",\"https://imx.to/i/67yoyn\",\"https://imx.to/i/67xja7\",\"https://imx.to/i/67yozs\",\"https://imx.to/i/67yoy9\",\"https://imx.to/i/67yoze\",\"https://imx.to/i/67youg\",\"https://imx.to/i/67yous\",\"https://imx.to/i/67xjal\",\"https://imx.to/i/67yoy8\",\"https://imx.to/i/67yovu\",\"https://imx.to/i/67yoxx\",\"https://imx.to/i/67yoz7\",\"https://imx.to/i/67xj9s\",\"https://imx.to/i/67yoyv\",\"https://imx.to/i/67yovp\",\"https://imx.to/i/67yoww\",\"https://imx.to/i/67yoy1\",\"https://imx.to/i/67you8\",\"https://imx.to/i/67yoyr\",\"https://imx.to/i/67yp0b\",\"https://imx.to/i/67xjaq\",\"https://imx.to/i/67yovh\",\"https://imx.to/i/67yoyu\",\"https://imx.to/i/67yowc\",\"https://imx.to/i/67yp0c\",\"https://imx.to/i/67yov3\",\"https://imx.to/i/67yox2\",\"https://imx.to/i/67yout\",\"https://imx.to/i/67xjaf\",\"https://imx.to/i/67yoya\",\"https://imx.to/i/67yowk\",\"https://imx.to/i/67you9\",\"https://imx.to/i/67yoxq\",\"https://imx.to/i/67yov2\",\"https://imx.to/i/67youp\",\"https://imx.to/i/67yovz\",\"https://imx.to/i/67yowr\",\"https://imx.to/i/67youf\",\"https://imx.to/i/67yoxw\",\"https://imx.to/i/67yovk\",\"https://imx.to/i/67xjab\",\"https://imx.to/i/67yow0\",\"https://imx.to/i/67yozt\",\"https://imx.to/i/67yoyc\",\"https://imx.to/i/67yozk\",\"https://imx.to/i/67yowl\",\"https://imx.to/i/67yov1\",\"https://imx.to/i/67yoy2\",\"https://imx.to/i/67yovm\",\"https://imx.to/i/67yozd\",\"https://imx.to/i/67yowp\",\"https://imx.to/i/67youe\",\"https://imx.to/i/67xj9t\",\"https://imx.to/i/67xja2\",\"https://imx.to/i/67xjai\",\"https://imx.to/i/67xjah\",\"https://imx.to/i/67xjas\",\"https://imx.to/i/67xja8\",\"https://imx.to/i/67xjae\",\"https://imx.to/i/67xja4\",\"https://imx.to/i/67xjap\",\"https://imx.to/i/67xj9n\",\"https://imx.to/i/67xjat\",\"https://imx.to/i/67xj9z\",\"https://imx.to/i/67xj9r\",\"https://imx.to/i/67xj9w\",\"https://imx.to/i/67xj9x\",\"https://imx.to/i/67xja0\",\"https://imx.to/i/67xj9o\",\"https://imx.to/i/67xj9q\",\"https://imx.to/i/67xj9u\",\"https://imx.to/i/67xjak\",\"https://imx.to/i/67xj9v\",\"https://imx.to/i/67xjam\",\"https://imx.to/i/67xj9y\",\"https://imx.to/i/67xjan\",\"https://imx.to/i/67xjag\",\"https://imx.to/i/67yovd\",\"https://imx.to/i/67yowd\",\"https://imx.to/i/67yp0a\",\"https://imx.to/i/67yox9\",\"https://imx.to/i/67yoy6\",\"https://imx.to/i/67yp01\",\"https://imx.to/i/67yovg\",\"https://imx.to/i/67yowe\",\"https://imx.to/i/67your\",\"https://imx.to/i/67yoyd\",\"https://imx.to/i/67yoy0\",\"https://imx.to/i/67yoza\",\"https://imx.to/i/67you5\",\"https://imx.to/i/67yoxs\",\"https://imx.to/i/67yp0h\",\"https://imx.to/i/67yoxi\",\"https://imx.to/i/67yov5\",\"https://imx.to/i/67yoyp\",\"https://imx.to/i/67yp03\",\"https://imx.to/i/67yox5\",\"https://imx.to/i/67yov0\",\"https://imx.to/i/67yoyi\",\"https://imx.to/i/67yozn\",\"https://imx.to/i/67yowv\",\"https://imx.to/i/67yovo\",\"https://imx.to/i/67yozc\",\"https://imx.to/i/67youa\",\"https://imx.to/i/67yoxu\",\"https://imx.to/i/67yoz4\",\"https://imx.to/i/67yowh\",\"https://imx.to/i/67yoxj\",\"https://imx.to/i/67yov8\",\"https://imx.to/i/67yp06\",\"https://imx.to/i/67youu\",\"https://imx.to/i/67yow3\",\"https://imx.to/i/67yozy\",\"https://imx.to/i/67yozp\",\"https://imx.to/i/67yoy3\",\"https://imx.to/i/67yovw\",\"https://imx.to/i/67yoz6\",\"https://imx.to/i/67yova\",\"https://imx.to/i/67yox6\",\"https://imx.to/i/67yowj\",\"https://imx.to/i/67yp0i\",\"https://imx.to/i/67yowb\",\"https://imx.to/i/67yovl\",\"https://imx.to/i/67yoz8\",\"https://imx.to/i/67yoxa\",\"https://imx.to/i/67youm\"]\n","#!pip install wget\n","%cd /content/\n","for url in urls:\n"," # Extract the image ID from the URL\n"," image_id = url.split('/')[-1]\n"," # Construct the wget command\n"," !wget {url} -O '{image_id}.jpg'\n"," #print(command)\n","\n","\n","\n","\n"],"metadata":{"id":"sysTcSuu5roy"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["\n","\n","\n","# @markdown Use a custom prompt to instruct the image captioning model\n","custom_prompt = '' # @param {type:'string'}\n","enable_custom_prompt = True # @param {type:'boolean'}\n","if not enable_custom_prompt: custom_prompt = 'Describe the image in 400 words'\n","\n","CAPTION_TYPE_MAP = {\n"," (\"descriptive\", \"formal\", False, False): [f\"{custom_prompt}\"],\n"," (\"descriptive\", \"formal\", False, True): [\"Write a descriptive caption for this image in a formal tone within {word_count} words.\"],\n"," (\"descriptive\", \"formal\", True, False): [\"Write a {length} descriptive caption for this image in a formal tone.\"],\n"," (\"descriptive\", \"informal\", False, False): [\"Write a descriptive caption for this image in a casual tone.\"],\n"," (\"descriptive\", \"informal\", False, True): [\"Write a descriptive caption for this image in a casual tone within {word_count} words.\"],\n"," (\"descriptive\", \"informal\", True, False): [\"Write a {length} descriptive caption for this image in a casual tone.\"],\n"," (\"training_prompt\", \"formal\", False, False): [\"Write a stable diffusion prompt for this image.\"],\n"," (\"training_prompt\", \"formal\", False, True): [\"Write a stable diffusion prompt for this image within {word_count} words.\"],\n"," (\"training_prompt\", \"formal\", True, False): [\"Write a {length} stable diffusion prompt for this image.\"],\n"," (\"rng-tags\", \"formal\", False, False): [\"Write a list of Booru tags for this image.\"],\n"," (\"rng-tags\", \"formal\", False, True): [\"Write a list of Booru tags for this image within {word_count} words.\"],\n"," (\"rng-tags\", \"formal\", True, False): [\"Write a {length} list of Booru tags for this image.\"],\n","}"],"metadata":{"id":"_qrUZ7jRIxIf"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":6,"metadata":{"id":"PjO3Wc4kzR08","executionInfo":{"status":"ok","timestamp":1753119103713,"user_tz":-120,"elapsed":1,"user":{"displayName":"","userId":""}}},"outputs":[],"source":["# @markdown higher temperature = prompt creativity (default 0.6) <br> higher top_p = higher noise reduction in latent embedding (default 0.9)\n","temperature = 1.75 # @param {type:'slider',min:0.5,max:4.0,step:0.05}\n","top_p = 0.75 # @param {type:'slider',min:0.1,max:0.95,step:0.05}\n","temperature = float(temperature)\n","top_p = float(top_p)\n","prompt_str = 'invent words you think about when seeing this image'\n","#-----#\n","num=1\n","\n","@torch.no_grad()\n","def stream_chat(input_image: Image.Image, caption_type: str, caption_tone: str, caption_length: str | int) -> str:\n"," torch.cuda.empty_cache()\n"," length = None if caption_length == \"any\" else caption_length\n"," if isinstance(length, str):\n"," try:\n"," length = int(length)\n"," except ValueError:\n"," pass\n"," if caption_type == \"rng-tags\" or caption_type == \"training_prompt\":\n"," caption_tone = \"formal\"\n"," prompt_key = (caption_type, caption_tone, isinstance(length, str), isinstance(length, int))\n"," if prompt_key not in CAPTION_TYPE_MAP:\n"," raise ValueError(f\"Invalid caption type: {prompt_key}\")\n"," prompt_str = CAPTION_TYPE_MAP[prompt_key][0].format(length=length, word_count=length)\n"," print(f\"Prompt: {prompt_str}\")\n"," image = input_image.resize((384, 384), Image.LANCZOS)\n"," pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0\n"," pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])\n"," pixel_values = pixel_values.to('cuda')\n"," prompt = tokenizer.encode(prompt_str, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)\n"," with torch.amp.autocast_mode.autocast('cuda', enabled=True):\n"," vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)\n"," image_features = vision_outputs.hidden_states\n"," embedded_images = image_adapter(image_features)\n"," embedded_images = embedded_images.to('cuda')\n"," prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))\n"," assert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f\"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}\"\n"," embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))\n"," eot_embed = image_adapter.get_eot_embedding().unsqueeze(0).to(dtype=text_model.dtype)\n"," inputs_embeds = torch.cat([\n"," embedded_bos.expand(embedded_images.shape[0], -1, -1),\n"," embedded_images.to(dtype=embedded_bos.dtype),\n"," prompt_embeds.expand(embedded_images.shape[0], -1, -1),\n"," eot_embed.expand(embedded_images.shape[0], -1, -1),\n"," ], dim=1)\n"," input_ids = torch.cat([\n"," torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),\n"," torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),\n"," prompt,\n"," torch.tensor([[tokenizer.convert_tokens_to_ids(\"<|eot_id|>\")]], dtype=torch.long),\n"," ], dim=1).to('cuda')\n"," attention_mask = torch.ones_like(input_ids)\n"," generate_ids = text_model.generate(input_ids, top_p = top_p , temperature=temperature, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=3000, do_sample=True, suppress_tokens=None) # Uses the default which is temp=0.6, top_p=0.9\n"," generate_ids = generate_ids[:, input_ids.shape[1]:]\n"," if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids(\"<|eot_id|>\"):\n"," generate_ids = generate_ids[:, :-1]\n"," caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]\n"," caption = f'{caption.strip()}'.replace('Prompt: Describe the image in 400 words','')\n"," return caption"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"mhccTDyzirVn"},"outputs":[],"source":["# @markdown Split the image into 20 parts prior to running\n","no_parts = 1 # @param {type:'slider', min:1,max:30,step:1}\n","print(f'Splitting all images found under /content/... \\n into {no_parts} along x-axis')\n","import os,math\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","split_folder = f'/content/split/'\n","my_mkdirs(f'{split_folder}')\n","\n","\n","src_folder = '/content/'\n","suffixes = ['.gif','.png', '.jpeg' , '.webp' , '.jpg']\n","#num = 1\n","for filename in os.listdir(src_folder):\n"," os.remove(filename)\n"," continue\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," while os.path.exists(f'{tgt_folder}{num}.txt'):num = num+1\n"," print(filename)\n"," %cd {src_folder}\n"," #os.remove(f'{filename}')\n"," #continue\n"," image = Image.open(f\"{filename}\").convert('RGB')\n"," w,h=image.size\n"," #grid = product(range(0, h-h%d, d), range(0, w-w%d, d))\n"," divs=no_parts\n"," step=math.floor(w/divs)\n"," %cd {split_folder}\n"," for index in range(divs):\n"," box = (step*index, 0 ,step*(index+1),math.floor(0.8*h))\n"," image.crop(box).save(f'{num}_{index}.jpeg','JPEG')\n"," num = num+1\n"," #caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n"," #print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n"," #print(caption)\n"," #---------#\n"," #f = open(f\"{num}.txt\", \"w\")\n"," #f.write(f'{caption}')\n"," #f.close()\n"," #input_image.save(f'{num}.jpeg', \"JPEG\")\n"," os.remove(f\"{src_folder}{filename}\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"J811UZU6xZEo"},"outputs":[],"source":["\n","import os\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","my_mkdirs(f'{tgt_folder}')\n","split_folder = '/content/splits/'\n","src_folder = '/content'\n","if os.path.exists(f'{split_folder}'): src_folder = f'{split_folder}'\n","suffixes = ['.gif','.png', '.jpeg' , '.webp' , '.jpg']\n","num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," while os.path.exists(f'{tgt_folder}{num}.txt'):num = num+1\n"," print(filename)\n"," %cd {src_folder}\n"," input_image = Image.open(f\"{filename}\").convert('RGB')\n"," caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n"," print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n"," print(caption)\n"," #---------#\n"," %cd {tgt_folder}\n"," f = open(f\"{num}.txt\", \"w\")\n"," f.write(f'{caption}')\n"," f.close()\n"," input_image.save(f'{num}.jpeg', \"JPEG\")\n"," os.remove(f\"{src_folder}{filename}\")\n"," num = num+1"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"5EztLCjkPq4U"},"outputs":[],"source":["import shutil\n","%cd /content/\n","shutil.make_archive('/content/tmp', 'zip', '/content/tmp')"]},{"cell_type":"code","source":["# @markdown Save images of all urls found in image_urls.txt to workspace\n","\n","!wget -i image_urls.txt -P ./splits\n","\n"],"metadata":{"id":"v9UMCh3h_mNj"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"kM4TpfdB1amt"},"outputs":[],"source":["# @markdown Auto-disconnect from Google Colab upon running this cell\n","from google.colab import runtime\n","#runtime.unassign() #Disconnect from runtime"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753120703402},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1752593897385},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1752405756026},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1748859170548},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747227021653},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747225778912},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747224652750},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1746209168116},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1746181687155},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1742303655056},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740768524003},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740657473013},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739796923572},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739735627072}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}