File size: 6,301 Bytes
0a59d21 2ea4855 e51a809 0d3f2ef 2ea4855 3106295 85ff963 72363cc 85ff963 8442a03 10251e6 5499763 2ea4855 85ff963 a89e2e5 2ea4855 0a59d21 2ea4855 2709137 2ea4855 5a4895d 2ea4855 5a4895d 2ea4855 5a4895d 2ea4855 0a59d21 2ea4855 3106295 999aaf7 2ea4855 0a59d21 2ea4855 0a59d21 2ea4855 0a59d21 2ea4855 0a59d21 2ea4855 0a59d21 8290e1d 0a59d21 2ea4855 cc3ae19 0a59d21 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
import torch
import sys
from subprocess import run
from PIL import Image
import os
import base64
#run("pip install flash-attn --no-build-isolation", shell=True, check=True)
run("pip install --upgrade pip", shell=True, check=True)
run("pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu124", shell=True, check=True)
from transformers import AutoModelForVision2Seq, AutoProcessor, BitsAndBytesConfig
model_id = "ibm-granite/granite-vision-3.2-2b"
#model_id = "Portx/granite-vision-3.1-2b-preview-do-extractor"
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
llm_int8_skip_modules=["vision_tower", "lm_head"],
llm_int8_enable_fp32_cpu_offload=True
)
# check for GPU
device = 0 if torch.cuda.is_available() else -1
class Utils:
def convert_base64_to_jpg(base64_string):
image_data = base64.b64decode(base64_string)
with open("./do_img.jpg", 'wb') as f:
f.write(image_data)
class PromptSet:
system_message = "You are an expert in analyzing and extracting information from freight, shipment, or delivery orders. Please carefully read the provided order file and extract the following 10 key pieces of information. Ensure that the key names are exactly as listed below. Do not create any additional key names other than these. If any information is missing or unavailable, output '-'."
main_order_information_prompt = """#Key names and their descriptions:
1. container_number: The container number/no of the shipment (e.g., TRKU2038448, MSDU8549321). This should be an 11-character container number, with no additional format. If not available, output '-'.
2. bill_of_lading: The Bill of Lading number, which could include formats such as B/L No., AWS No., BL No., or ocean Bill of Lading (e.g., AXVJMER000008166, TRKU-10152009, HLCU ALY241000275). If not available, output '-'.
3. importing_carrier: The importing or ocean carrier, which may include SCAC codes, carrier's local agents, or sea line codes. If not available, output '-'.
4. origin_address: The address for picking up the container, such as the origin address, pickup location, terminal, or port of discharge. Exclude loading location information. (e.g., "PORT LIBERTY NY CONTAINER TERMINAL 300 WESTERN AVE"). If not available, output '-'.
5. destination_address: The address where the container is to be delivered, typically a company name or a specific delivery location (e.g., "AERO RECEIVING EAST, 2 BRICK PLANT ROAD, SOUTH RIVER, NJ"). If not available, output '-'.
6. container_weight: The weight of the container (in numeric format, e.g., 58,201.44). If there are multiple weights, output the highest value. If not available, output '-'.
7. container_weight_unit: The unit of measurement for the container's weight (e.g., LBS, KGS, KG, LB). If not available, output '-'.
8. container_type: The type/size of the container (e.g., 40HC, 20GP FCL). If not available, output '-'.
9. po_number: The purchase order number or customer’s PO (e.g., PO Number, customer’s PO, consol). If not available, output '-'.
10. reference_number: The reference number, file number, or any internal reference (e.g., reference number, our ref no.). If not available, output '-'.
#Output:
{container_number: ...,
bill_of_lading: ..,
importing_carrier: ...,
origin_address: ...,
destination_address: ...,
container_weight: ...,
container_weight_unit: ...,
container_type: ...,
po_number: ...,
reference_number: ...
}
Guidelines:
- Very important: do not make up anything. If the information of a required field is not available, output '-' for it.
- Output in JSON format. The JSON should contain the above 10 keys.
"""
order_list_prompt = "How much container are there? Give to me all container numbers only in a json array?"
multiple_container_information_prompt = "Give to me container weight, container weight unit,the container size (with type) of {query} in the same line with container_number:{query}.You must response only in a JSON format. Example output is must be 'container_number': 'OOCU6979480', 'container_type': '40HC or DV', 'weight': '46,737.52', 'weight_unit': 'LB'"
class EndpointHandler():
def __init__(self, path=""):
self.model=AutoModelForVision2Seq.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16,
quantization_config=bnb_config)
#self.model.load_adapter("Portx/granite-vision-3.2-2b-20252802")
self.processor = AutoProcessor.from_pretrained(model_id, use_fast=True)
def __call__(self, data):
# deserialize incomin request
inputs = data.pop("inputs", data)
parameters = data.pop("parameters", None)
prompt_id = data.pop("prompt_id", None)
base64_image = data.pop("image", None)
converted_image = Utils.convert_base64_to_jpg(base64_image)
if prompt_id==1:
final_prompt=PromptSet.main_order_information_prompt
elif prompt_id==2:
final_prompt=PromptSet.order_list_prompt
elif prompt_id==3:
final_prompt=PromptSet.multiple_container_information_prompt
else:
final_prompt=inputs
conversation = [{
"role": "system",
"content": [
{
"type": "text",
"text": PromptSet.system_message
}
],
},{
"role": "user",
"content": [
{"type": "image", "url": "./do_img.jpg"},
{"type": "text", "text": final_prompt},
],},
]
model_inputs = self.processor.apply_chat_template(conversation,add_generation_prompt=True,
tokenize=True, return_dict=True,return_tensors="pt").to(device)
output = self.model.generate(**model_inputs, max_new_tokens=512)
prediction = self.processor.decode(output[0], skip_special_tokens=True)
return prediction |