o
    h%                     @   s   d dl mZmZmZmZ d dlZddlmZm	Z	m
Z
mZmZ ddlmZmZ e
 r5d dlmZ ddlmZ e	 rDdd	lmZmZmZmZ eeZeeef Zee Zeed
dG dd deZdS )    )AnyDictListUnionN   )add_end_docstringsis_torch_availableis_vision_availableloggingrequires_backends   )Pipelinebuild_pipeline_init_args)Image)
load_image)*MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES-MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES-MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES.MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMEST)has_image_processorc                       sf   e Zd ZdZ fddZdd Zddeeee	 f f fdd	Z
dd
dZdd Z	dddZ  ZS )ImageSegmentationPipelinea  
    Image segmentation pipeline using any `AutoModelForXXXSegmentation`. This pipeline predicts masks of objects and
    their classes.

    Example:

    ```python
    >>> from transformers import pipeline

    >>> segmenter = pipeline(model="facebook/detr-resnet-50-panoptic")
    >>> segments = segmenter("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
    >>> len(segments)
    2

    >>> segments[0]["label"]
    'bird'

    >>> segments[1]["label"]
    'bird'

    >>> type(segments[0]["mask"])  # This is a black and white mask showing where is the bird on the original image.
    <class 'PIL.Image.Image'>

    >>> segments[0]["mask"].size
    (768, 512)
    ```


    This image segmentation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"image-segmentation"`.

    See the list of available models on
    [huggingface.co/models](https://huggingface.co/models?filter=image-segmentation).
    c                    sl   t  j|i | | jdkrtd| j dt| d t }|t	 |t
 |t | | d S )NtfzThe z is only available in PyTorch.vision)super__init__	framework
ValueError	__class__r   r   copyupdater   r   r   check_model_type)selfargskwargsmappingr    }/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/transformers/pipelines/image_segmentation.pyr   C   s   




z"ImageSegmentationPipeline.__init__c                 K   s   i }i }d|v r|d |d< |d |d< d|v r|d |d< d|v r(|d |d< d|v r2|d |d< d|v r<|d |d< |i |fS )Nsubtask	thresholdmask_thresholdoverlap_mask_area_thresholdtimeoutr&   )r!   r#   preprocess_kwargspostprocess_kwargsr&   r&   r'   _sanitize_parametersP   s   
z.ImageSegmentationPipeline._sanitize_parametersNreturnc                    s6   d|v r	| d}|du rtdt j|fi |S )a	  
        Perform segmentation (detect masks & classes) in the image(s) passed as inputs.

        Args:
            inputs (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
                The pipeline handles three types of images:

                - A string containing an HTTP(S) link pointing to an image
                - A string containing a local path to an image
                - An image loaded in PIL directly

                The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the
                same format: all as HTTP(S) links, all as local paths, or all as PIL images.
            subtask (`str`, *optional*):
                Segmentation task to be performed, choose [`semantic`, `instance` and `panoptic`] depending on model
                capabilities. If not set, the pipeline will attempt tp resolve in the following order:
                  `panoptic`, `instance`, `semantic`.
            threshold (`float`, *optional*, defaults to 0.9):
                Probability threshold to filter out predicted masks.
            mask_threshold (`float`, *optional*, defaults to 0.5):
                Threshold to use when turning the predicted masks into binary values.
            overlap_mask_area_threshold (`float`, *optional*, defaults to 0.5):
                Mask overlap threshold to eliminate small, disconnected segments.
            timeout (`float`, *optional*, defaults to None):
                The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
                the call may block forever.

        Return:
            A dictionary or a list of dictionaries containing the result. If the input is a single image, will return a
            list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries
            corresponding to each image.

            The dictionaries contain the mask, label and score (where applicable) of each detected object and contains
            the following keys:

            - **label** (`str`) -- The class label identified by the model.
            - **mask** (`PIL.Image`) -- A binary mask of the detected object as a Pil Image of shape (width, height) of
              the original image. Returns a mask filled with zeros if no object is found.
            - **score** (*optional* `float`) -- Optionally, when the model is capable of estimating a confidence of the
              "object" described by the label and the mask.
        imagesNzICannot call the image-classification pipeline without an inputs argument!)popr   r   __call__)r!   inputsr#   r%   r&   r'   r3   a   s
   +
z"ImageSegmentationPipeline.__call__c                 C   s   t ||d}|j|jfg}| jjjjdkrL|d u ri }nd|gi}| jd
|gdd|}| jdkr8|	| j
}| j|d d| jjj| jdd |d< n| j|gdd}| jdkr_|	| j
}||d	< |S )N)r,   OneFormerConfigtask_inputspt)r1   return_tensors
max_length)paddingr9   r8   	input_idstarget_sizer&   )r   heightwidthmodelconfigr   __name__image_processorr   totorch_dtype	tokenizertask_seq_len)r!   imager(   r,   r<   r#   r4   r&   r&   r'   
preprocess   s,   



z$ImageSegmentationPipeline.preprocessc                 C   s&   | d}| jdi |}||d< |S )Nr<   r&   )r2   r?   )r!   model_inputsr<   model_outputsr&   r&   r'   _forward   s   
z"ImageSegmentationPipeline._forward?      ?c                 C   s  d }|dv rt | jdr| jj}n|dv rt | jdr| jj}|d urj||||||d dd }g }|d }	|d	 D ]-}
|	|
d
 kd }tj| tj	dd}| j
jj|
d  }|
d }||||d q:|S |dv rt | jdr| jj||d dd }g }| }	t|	}|D ]#}|	|kd }tj|tj	dd}| j
jj| }|d ||d q|S td| dt| j
 )N>   Npanoptic"post_process_panoptic_segmentation>   Ninstance"post_process_instance_segmentationr<   )r)   r*   r+   target_sizesr   segmentationsegments_infoid   L)modelabel_idscore)rZ   labelmask>   Nsemantic"post_process_semantic_segmentation)rR   zSubtask z is not supported for model )hasattrrB   rO   rQ   r   	fromarraynumpyastypenpuint8r?   r@   id2labelappendr^   uniquer   type)r!   rJ   r(   r)   r*   r+   fnoutputs
annotationrS   segmentr\   r[   rZ   labelsr&   r&   r'   postprocess   sP   

z%ImageSegmentationPipeline.postprocess)N)NN)NrL   rM   rM   )rA   
__module____qualname____doc__r   r/   r   Predictionsr   
Predictionr3   rH   rK   rn   __classcell__r&   r&   r%   r'   r      s    # 
1r   ) typingr   r   r   r   ra   rc   utilsr   r   r	   r
   r   baser   r   PILr   image_utilsr   models.auto.modeling_autor   r   r   r   
get_loggerrA   loggerstrrs   rr   r   r&   r&   r&   r'   <module>   s    
