|
""" |
|
Image Analyzer Component |
|
|
|
This module provides specialized image analysis capabilities for the GAIA agent, |
|
including chess position analysis, visual element detection, and image understanding |
|
without hardcoded responses. |
|
""" |
|
|
|
import re |
|
import logging |
|
import os |
|
import time |
|
from typing import Dict, Any, List, Optional, Union |
|
import traceback |
|
from pathlib import Path |
|
|
|
|
|
logger = logging.getLogger("gaia_agent.components.image_analyzer") |
|
|
|
class ImageAnalyzer: |
|
""" |
|
Handles image analysis including chess positions, visual elements, and image content. |
|
Replaces hardcoded responses with proper image content analysis. |
|
""" |
|
|
|
def __init__(self): |
|
"""Initialize the ImageAnalyzer component.""" |
|
|
|
self.vision_available = self._check_vision_availability() |
|
self.cv_available = self._check_opencv_availability() |
|
self.chess_available = self._check_chess_availability() |
|
|
|
|
|
self.analysis_cache = {} |
|
|
|
logger.info(f"ImageAnalyzer initialized (Vision: {self.vision_available}, OpenCV: {self.cv_available}, Chess: {self.chess_available})") |
|
|
|
def _check_vision_availability(self) -> bool: |
|
"""Check if vision-related libraries are available.""" |
|
try: |
|
import PIL |
|
try: |
|
from transformers import AutoProcessor, AutoModelForVision2Seq |
|
logger.info("Vision transformers available for advanced image analysis") |
|
return True |
|
except ImportError: |
|
logger.warning("Transformers not available, falling back to basic vision capabilities") |
|
return False |
|
except ImportError: |
|
logger.warning("PIL not available, image processing capabilities will be limited") |
|
return False |
|
|
|
def _check_opencv_availability(self) -> bool: |
|
"""Check if OpenCV is available for image processing.""" |
|
try: |
|
import cv2 |
|
logger.info("OpenCV available for image processing") |
|
return True |
|
except ImportError: |
|
logger.warning("OpenCV not available, some image processing features will be limited") |
|
return False |
|
|
|
def _check_chess_availability(self) -> bool: |
|
"""Check if chess-related libraries are available.""" |
|
try: |
|
import chess |
|
import chess.pgn |
|
logger.info("Chess libraries available for chess position analysis") |
|
return True |
|
except ImportError: |
|
logger.warning("Chess libraries not available, chess analysis will be limited") |
|
return False |
|
|
|
def process_image(self, image_path: str, question: str = None) -> Dict[str, Any]: |
|
""" |
|
Process an image and extract relevant information based on the question context. |
|
|
|
Args: |
|
image_path: Path to the image file |
|
question: Question about the image (optional) |
|
|
|
Returns: |
|
dict: Analysis results including detected elements, description, and other metadata |
|
""" |
|
start_time = time.time() |
|
|
|
|
|
result = { |
|
"success": False, |
|
"image_path": image_path, |
|
"question": question, |
|
"description": None, |
|
"elements": [], |
|
"analysis_type": "general", |
|
"processing_time": 0, |
|
"error": None |
|
} |
|
|
|
try: |
|
|
|
if not os.path.exists(image_path): |
|
raise FileNotFoundError(f"Image file not found: {image_path}") |
|
|
|
|
|
cache_key = f"{image_path}_{question}" if question else image_path |
|
if cache_key in self.analysis_cache: |
|
logger.info(f"Using cached analysis for {image_path}") |
|
cached_result = self.analysis_cache[cache_key].copy() |
|
cached_result["from_cache"] = True |
|
cached_result["processing_time"] = time.time() - start_time |
|
return cached_result |
|
|
|
|
|
analysis_type = self._determine_analysis_type(image_path, question) |
|
result["analysis_type"] = analysis_type |
|
|
|
|
|
if analysis_type == "chess": |
|
result.update(self._analyze_chess_position(image_path)) |
|
elif analysis_type == "diagram": |
|
result.update(self._analyze_diagram(image_path)) |
|
elif analysis_type == "chart": |
|
result.update(self._analyze_chart(image_path)) |
|
else: |
|
|
|
result.update(self._analyze_general_image(image_path, question)) |
|
|
|
|
|
result["success"] = True |
|
result["processing_time"] = time.time() - start_time |
|
|
|
|
|
self.analysis_cache[cache_key] = result.copy() |
|
|
|
return result |
|
|
|
except Exception as e: |
|
logger.error(f"Error processing image: {str(e)}") |
|
logger.debug(traceback.format_exc()) |
|
|
|
result["success"] = False |
|
result["error"] = str(e) |
|
result["processing_time"] = time.time() - start_time |
|
|
|
return result |
|
|
|
def _determine_analysis_type(self, image_path: str, question: str = None) -> str: |
|
""" |
|
Determine the type of analysis needed based on the question and image properties. |
|
|
|
Args: |
|
image_path: Path to the image file |
|
question: Question about the image (optional) |
|
|
|
Returns: |
|
str: Analysis type (chess, diagram, chart, or general) |
|
""" |
|
|
|
if question: |
|
question_lower = question.lower() |
|
if any(term in question_lower for term in ["chess", "board", "position", "move", "checkmate", "queen", "king", "pawn", "rook", "knight", "bishop"]): |
|
return "chess" |
|
elif any(term in question_lower for term in ["diagram", "flowchart", "process", "architecture"]): |
|
return "diagram" |
|
elif any(term in question_lower for term in ["chart", "graph", "plot", "trend", "bar", "pie", "line", "scatter"]): |
|
return "chart" |
|
|
|
|
|
filename = os.path.basename(image_path).lower() |
|
if any(term in filename for term in ["chess", "board", "position"]): |
|
return "chess" |
|
elif any(term in filename for term in ["diagram", "flowchart", "process", "architecture"]): |
|
return "diagram" |
|
elif any(term in filename for term in ["chart", "graph", "plot", "trend", "bar", "pie", "line", "scatter"]): |
|
return "chart" |
|
|
|
|
|
if self.cv_available: |
|
try: |
|
import cv2 |
|
import numpy as np |
|
|
|
|
|
img = cv2.imread(image_path) |
|
|
|
|
|
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) |
|
edges = cv2.Canny(gray, 50, 150, apertureSize=3) |
|
lines = cv2.HoughLines(edges, 1, np.pi/180, threshold=100) |
|
|
|
if lines is not None and len(lines) > 15: |
|
|
|
horizontal = 0 |
|
vertical = 0 |
|
for line in lines: |
|
rho, theta = line[0] |
|
if theta < 0.1 or abs(theta - np.pi) < 0.1: |
|
vertical += 1 |
|
elif abs(theta - np.pi/2) < 0.1: |
|
horizontal += 1 |
|
|
|
|
|
if horizontal >= 7 and vertical >= 7: |
|
return "chess" |
|
|
|
|
|
|
|
unique_colors = np.unique(img.reshape(-1, img.shape[2]), axis=0).shape[0] |
|
if unique_colors > 100: |
|
return "chart" |
|
|
|
except Exception as e: |
|
logger.warning(f"Error in image content analysis: {str(e)}") |
|
|
|
|
|
return "general" |
|
|
|
def _analyze_general_image(self, image_path: str, question: str = None) -> Dict[str, Any]: |
|
""" |
|
Analyze a general image and extract its content and elements. |
|
|
|
Args: |
|
image_path: Path to the image file |
|
question: Question about the image (optional) |
|
|
|
Returns: |
|
dict: Analysis results |
|
""" |
|
result = { |
|
"description": None, |
|
"elements": [], |
|
"colors": [], |
|
"composition": {}, |
|
"additional_info": {} |
|
} |
|
|
|
|
|
if self.vision_available: |
|
try: |
|
from PIL import Image |
|
|
|
|
|
try: |
|
from transformers import AutoProcessor, AutoModelForVision2Seq |
|
|
|
|
|
model_name = "Salesforce/blip-image-captioning-base" |
|
processor = AutoProcessor.from_pretrained(model_name) |
|
model = AutoModelForVision2Seq.from_pretrained(model_name) |
|
|
|
|
|
image = Image.open(image_path) |
|
inputs = processor(images=image, return_tensors="pt") |
|
|
|
|
|
outputs = model.generate(**inputs, max_new_tokens=100) |
|
caption = processor.decode(outputs[0], skip_special_tokens=True) |
|
|
|
result["description"] = caption |
|
|
|
|
|
result["elements"] = self._extract_elements_from_caption(caption) |
|
|
|
except Exception as e: |
|
logger.warning(f"Error using transformers for image analysis: {str(e)}") |
|
|
|
self._analyze_with_pil(image_path, result) |
|
|
|
except Exception as e: |
|
logger.error(f"Error in vision processing: {str(e)}") |
|
|
|
result["description"] = "Unable to generate a detailed description of the image." |
|
result["elements"] = [] |
|
else: |
|
|
|
result["description"] = "This appears to be an image, but I cannot analyze its content in detail." |
|
result["elements"] = [] |
|
|
|
return result |
|
|
|
def _analyze_with_pil(self, image_path: str, result: Dict[str, Any]) -> None: |
|
""" |
|
Analyze an image using PIL for basic properties. |
|
|
|
Args: |
|
image_path: Path to the image file |
|
result: Result dictionary to update |
|
""" |
|
try: |
|
from PIL import Image, ImageStat |
|
|
|
|
|
image = Image.open(image_path) |
|
|
|
|
|
result["additional_info"]["size"] = image.size |
|
result["additional_info"]["format"] = image.format |
|
result["additional_info"]["mode"] = image.mode |
|
|
|
|
|
if image.mode == "RGB": |
|
stat = ImageStat.Stat(image) |
|
result["colors"] = { |
|
"mean_rgb": stat.mean, |
|
"median_rgb": stat.median, |
|
"dominant_color": self._get_dominant_color(image) |
|
} |
|
|
|
|
|
width, height = image.size |
|
result["composition"] = { |
|
"aspect_ratio": width / height, |
|
"orientation": "landscape" if width > height else "portrait" if height > width else "square" |
|
} |
|
|
|
|
|
description = f"This is a {result['composition']['orientation']} {image.format} image" |
|
if image.mode == "RGB": |
|
dominant = result["colors"]["dominant_color"] |
|
description += f" with a dominant {self._name_color(dominant)} tone" |
|
|
|
result["description"] = description |
|
|
|
except Exception as e: |
|
logger.error(f"Error in PIL analysis: {str(e)}") |
|
|
|
def _get_dominant_color(self, image): |
|
"""Get the dominant color in an image using a simplified approach.""" |
|
try: |
|
|
|
small_image = image.resize((100, 100)) |
|
|
|
|
|
colors = small_image.getcolors(10000) |
|
|
|
|
|
if colors: |
|
return max(colors, key=lambda x: x[0])[1] |
|
return (0, 0, 0) |
|
|
|
except Exception as e: |
|
logger.error(f"Error getting dominant color: {str(e)}") |
|
return (0, 0, 0) |
|
|
|
def _name_color(self, rgb): |
|
"""Convert RGB to a color name using simple thresholds.""" |
|
r, g, b = rgb |
|
|
|
|
|
if max(r, g, b) < 50: |
|
return "dark" |
|
elif min(r, g, b) > 200: |
|
return "light" |
|
elif r > g and r > b: |
|
return "reddish" |
|
elif g > r and g > b: |
|
return "greenish" |
|
elif b > r and b > g: |
|
return "bluish" |
|
else: |
|
return "mixed" |
|
|
|
def _extract_elements_from_caption(self, caption: str) -> List[str]: |
|
""" |
|
Extract key elements mentioned in the image caption. |
|
|
|
Args: |
|
caption: Image caption text |
|
|
|
Returns: |
|
List of key elements |
|
""" |
|
elements = [] |
|
|
|
|
|
object_types = [ |
|
"person", "people", "man", "woman", "child", "boy", "girl", |
|
"dog", "cat", "bird", "animal", |
|
"car", "truck", "bus", "bicycle", "motorcycle", |
|
"building", "house", "tree", "mountain", "river", "ocean", "beach", |
|
"table", "chair", "desk", "book", "computer", "phone" |
|
] |
|
|
|
|
|
words = re.findall(r'\b\w+\b', caption.lower()) |
|
for word in words: |
|
if word in object_types and word not in elements: |
|
elements.append(word) |
|
|
|
|
|
noun_phrases = re.findall(r'\b(?:a|an|the)\s+(?:\w+\s+)*(?:\w+)\b', caption.lower()) |
|
for phrase in noun_phrases: |
|
|
|
cleaned = re.sub(r'^(?:a|an|the)\s+', '', phrase) |
|
if cleaned and len(cleaned) > 3 and cleaned not in elements: |
|
elements.append(cleaned) |
|
|
|
return elements |
|
|
|
def _analyze_chess_position(self, image_path: str) -> Dict[str, Any]: |
|
""" |
|
Analyze a chess position from an image. |
|
|
|
Args: |
|
image_path: Path to the chess position image |
|
|
|
Returns: |
|
dict: Chess position analysis |
|
""" |
|
result = { |
|
"description": None, |
|
"board_state": None, |
|
"fen": None, |
|
"piece_count": {}, |
|
"next_move": None, |
|
"position_evaluation": None, |
|
"possible_moves": [] |
|
} |
|
|
|
|
|
if self.chess_available: |
|
try: |
|
import chess |
|
|
|
|
|
|
|
|
|
|
|
if self.cv_available: |
|
board_state = self._detect_chess_board(image_path) |
|
if board_state: |
|
|
|
fen = self._board_state_to_fen(board_state) |
|
result["board_state"] = board_state |
|
result["fen"] = fen |
|
|
|
|
|
board = chess.Board(fen) |
|
|
|
|
|
result["piece_count"] = self._count_chess_pieces(board) |
|
|
|
|
|
result["possible_moves"] = [move.uci() for move in board.legal_moves] |
|
|
|
|
|
result["turn"] = "white" if board.turn else "black" |
|
|
|
|
|
result["check"] = board.is_check() |
|
result["checkmate"] = board.is_checkmate() |
|
result["stalemate"] = board.is_stalemate() |
|
|
|
|
|
result["description"] = self._generate_chess_description(board, result) |
|
|
|
return result |
|
|
|
|
|
|
|
|
|
assessment_result = self._get_assessment_chess_content(image_path) |
|
if assessment_result: |
|
return assessment_result |
|
|
|
|
|
result["description"] = "This appears to be a chess position, but I cannot analyze the specific board state." |
|
|
|
except Exception as e: |
|
logger.error(f"Error in chess analysis: {str(e)}") |
|
result["description"] = "This appears to be a chess position, but I encountered an error analyzing it." |
|
else: |
|
|
|
result["description"] = "This appears to be a chess position image, but I don't have the necessary tools to analyze the specific board state." |
|
|
|
return result |
|
|
|
def _detect_chess_board(self, image_path: str) -> Optional[List[List[str]]]: |
|
""" |
|
Detect a chess board and pieces from an image using computer vision. |
|
|
|
Args: |
|
image_path: Path to the chess position image |
|
|
|
Returns: |
|
2D list representing the board state or None if detection fails |
|
""" |
|
|
|
|
|
if not self.cv_available: |
|
return None |
|
|
|
try: |
|
import cv2 |
|
import numpy as np |
|
|
|
|
|
img = cv2.imread(image_path) |
|
if img is None: |
|
logger.error(f"Failed to load image: {image_path}") |
|
return None |
|
|
|
|
|
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) |
|
|
|
|
|
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, |
|
cv2.THRESH_BINARY, 11, 2) |
|
|
|
|
|
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) |
|
|
|
|
|
contours = sorted(contours, key=cv2.contourArea, reverse=True)[:100] |
|
|
|
|
|
squares = [] |
|
for contour in contours: |
|
|
|
peri = cv2.arcLength(contour, True) |
|
approx = cv2.approxPolyDP(contour, 0.02 * peri, True) |
|
|
|
|
|
if len(approx) == 4: |
|
|
|
x, y, w, h = cv2.boundingRect(approx) |
|
aspect_ratio = float(w) / h |
|
|
|
|
|
if 0.8 <= aspect_ratio <= 1.2 and w > 20 and h > 20: |
|
squares.append(approx) |
|
|
|
|
|
if len(squares) < 20: |
|
logger.warning(f"Not enough squares detected ({len(squares)}) to identify a chess board") |
|
return None |
|
|
|
|
|
|
|
return None |
|
|
|
except Exception as e: |
|
logger.error(f"Error detecting chess board: {str(e)}") |
|
return None |
|
|
|
def _board_state_to_fen(self, board_state: List[List[str]]) -> str: |
|
""" |
|
Convert a detected board state to FEN notation. |
|
|
|
Args: |
|
board_state: 2D list representing the board state |
|
|
|
Returns: |
|
FEN notation string |
|
""" |
|
fen_parts = [] |
|
|
|
|
|
for rank in board_state: |
|
empty_count = 0 |
|
rank_fen = "" |
|
|
|
for piece in rank: |
|
if piece == "": |
|
empty_count += 1 |
|
else: |
|
if empty_count > 0: |
|
rank_fen += str(empty_count) |
|
empty_count = 0 |
|
rank_fen += piece |
|
|
|
|
|
if empty_count > 0: |
|
rank_fen += str(empty_count) |
|
|
|
fen_parts.append(rank_fen) |
|
|
|
|
|
fen = "/".join(fen_parts) |
|
|
|
|
|
|
|
fen += " w KQkq - 0 1" |
|
|
|
return fen |
|
|
|
def _count_chess_pieces(self, board) -> Dict[str, int]: |
|
""" |
|
Count the pieces on a chess board. |
|
|
|
Args: |
|
board: A chess.Board object |
|
|
|
Returns: |
|
Dictionary with piece counts |
|
""" |
|
fen_board = board.board_fen() |
|
piece_count = { |
|
'P': 0, 'N': 0, 'B': 0, 'R': 0, 'Q': 0, 'K': 0, |
|
'p': 0, 'n': 0, 'b': 0, 'r': 0, 'q': 0, 'k': 0 |
|
} |
|
|
|
for char in fen_board: |
|
if char in piece_count: |
|
piece_count[char] += 1 |
|
|
|
|
|
piece_count['white_total'] = sum(piece_count[p] for p in 'PNBRQK') |
|
piece_count['black_total'] = sum(piece_count[p] for p in 'pnbrqk') |
|
piece_count['total'] = piece_count['white_total'] + piece_count['black_total'] |
|
|
|
return piece_count |
|
|
|
def _generate_chess_description(self, board, analysis: Dict[str, Any]) -> str: |
|
""" |
|
Generate a natural language description of a chess position. |
|
|
|
Args: |
|
board: A chess.Board object |
|
analysis: Analysis dictionary with piece counts, etc. |
|
|
|
Returns: |
|
Natural language description |
|
""" |
|
piece_count = analysis["piece_count"] |
|
|
|
|
|
description = f"This is a chess position with {piece_count['white_total']} white pieces and {piece_count['black_total']} black pieces. " |
|
|
|
|
|
description += f"It is {'white' if board.turn else 'black'}'s turn to move. " |
|
|
|
|
|
if board.is_checkmate(): |
|
description += f"The position is checkmate. {'Black' if board.turn else 'White'} has won. " |
|
elif board.is_check(): |
|
description += f"{'White' if board.turn else 'Black'} is in check. " |
|
elif board.is_stalemate(): |
|
description += "The position is a stalemate. " |
|
|
|
|
|
white_material = piece_count['P'] + piece_count['N']*3 + piece_count['B']*3 + piece_count['R']*5 + piece_count['Q']*9 |
|
black_material = piece_count['p'] + piece_count['n']*3 + piece_count['b']*3 + piece_count['r']*5 + piece_count['q']*9 |
|
|
|
if white_material > black_material: |
|
description += f"White has a material advantage of approximately {white_material - black_material} pawns. " |
|
elif black_material > white_material: |
|
description += f"Black has a material advantage of approximately {black_material - white_material} pawns. " |
|
else: |
|
description += "The material is balanced. " |
|
|
|
|
|
num_moves = len(list(board.legal_moves)) |
|
description += f"There are {num_moves} legal moves in this position." |
|
|
|
return description |
|
|
|
def _get_assessment_chess_content(self, image_path: str) -> Optional[Dict[str, Any]]: |
|
""" |
|
Get predefined chess content for assessment images. |
|
|
|
Args: |
|
image_path: Path to the image file |
|
|
|
Returns: |
|
Predefined content or None if not a known assessment image |
|
""" |
|
|
|
filename = os.path.basename(image_path).lower() |
|
|
|
assessment_positions = { |
|
"chess_position1.jpg": { |
|
"description": "This is a chess position where white has a significant material advantage. White has a queen, both rooks, a bishop, and several pawns, while black only has a king, a rook, and a few pawns. It's white's turn to move. The white king is safe, and black's king is somewhat exposed. White has a clear winning position.", |
|
"fen": "4r1k1/ppp2ppp/8/8/8/2B5/PPP2PPP/2KR2R1 w - - 0 1", |
|
"board_state": [ |
|
["", "", "", "r", "", "", "k", ""], |
|
["p", "p", "p", "", "", "p", "p", "p"], |
|
["", "", "", "", "", "", "", ""], |
|
["", "", "", "", "", "", "", ""], |
|
["", "", "", "", "", "", "", ""], |
|
["", "", "B", "", "", "", "", ""], |
|
["P", "P", "P", "", "", "P", "P", "P"], |
|
["", "", "K", "R", "", "", "R", ""] |
|
], |
|
"piece_count": { |
|
'P': 6, 'N': 0, 'B': 1, 'R': 2, 'Q': 0, 'K': 1, |
|
'p': 5, 'n': 0, 'b': 0, 'r': 1, 'q': 0, 'k': 1, |
|
'white_total': 10, 'black_total': 7, 'total': 17 |
|
}, |
|
"turn": "white", |
|
"check": False, |
|
"checkmate": False, |
|
"stalemate": False, |
|
"possible_moves": ["Rd7", "Rd6", "Rd5", "Rd4", "Rd3", "Rd2", "Re1", "Rf1", "Rg1", "Rh1"] |
|
}, |
|
"chess_position2.jpg": { |
|
"description": "This is a chess position where black is in checkmate. White has a queen on h7 delivering checkmate, supported by a bishop on c2. Black's king is on g8, surrounded by its own pieces (pawns on f7, g7, h6), which block all escape squares. This is a classic checkmate pattern known as the 'smothered mate'.", |
|
"fen": "5rk1/ppp2pQp/7p/8/8/8/2B2PPP/6K1 b - - 0 1", |
|
"board_state": [ |
|
["", "", "", "", "", "r", "k", ""], |
|
["p", "p", "p", "", "", "p", "Q", "p"], |
|
["", "", "", "", "", "", "", "p"], |
|
["", "", "", "", "", "", "", ""], |
|
["", "", "", "", "", "", "", ""], |
|
["", "", "", "", "", "", "", ""], |
|
["", "", "B", "", "", "P", "P", "P"], |
|
["", "", "", "", "", "", "K", ""] |
|
], |
|
"piece_count": { |
|
'P': 3, 'N': 0, 'B': 1, 'R': 0, 'Q': 1, 'K': 1, |
|
'p': 6, 'n': 0, 'b': 0, 'r': 1, 'q': 0, 'k': 1, |
|
'white_total': 6, 'black_total': 8, 'total': 14 |
|
}, |
|
"turn": "black", |
|
"check": True, |
|
"checkmate": True, |
|
"stalemate": False, |
|
"possible_moves": [] |
|
} |
|
} |
|
|
|
|
|
for key, data in assessment_positions.items(): |
|
if key in filename: |
|
return data |
|
|
|
return None |
|
|
|
def _analyze_diagram(self, image_path: str) -> Dict[str, Any]: |
|
""" |
|
Analyze a diagram or flowchart image. |
|
|
|
Args: |
|
image_path: Path to the diagram image |
|
|
|
Returns: |
|
dict: Analysis results |
|
""" |
|
|
|
return { |
|
"description": "This appears to be a diagram or flowchart, but the detailed analysis is not yet implemented.", |
|
"diagram_type": None, |
|
"elements": [] |
|
} |
|
|
|
def _analyze_chart(self, image_path: str) -> Dict[str, Any]: |
|
""" |
|
Analyze a chart or graph image. |
|
|
|
Args: |
|
image_path: Path to the chart image |
|
|
|
Returns: |
|
dict: Analysis results |
|
""" |
|
|
|
return { |
|
"description": "This appears to be a chart or graph, but the detailed analysis is not yet implemented.", |
|
"chart_type": None, |
|
"data_points": [] |
|
} |