apply Black 25.11.0 style in fbcode/deeplearning/projects (21/92)

Summary:
Formats the covered files with pyfmt.

paintitblack

Reviewed By: itamaro

Differential Revision: D90476315

fbshipit-source-id: ee94c471788b8e7d067813d8b3e0311214d17f3f
This commit is contained in:
Bowie Chen
2026-01-11 23:16:49 -08:00
committed by meta-codesync[bot]
parent 7b89b8fc3f
commit 11dec2936d
69 changed files with 445 additions and 522 deletions

View File

@@ -142,9 +142,9 @@ class COCO_FROM_JSON:
self.prompts = {}
for loc_dict in prompts:
self.prompts[int(loc_dict["id"])] = loc_dict["name"]
assert len(self.prompts) == len(
self._sorted_cat_ids
), "Number of prompts must match number of categories"
assert len(self.prompts) == len(self._sorted_cat_ids), (
"Number of prompts must match number of categories"
)
def getDatapointIds(self):
"""Return all datapoint indices for training."""

View File

@@ -6,7 +6,6 @@ from dataclasses import dataclass, field as field_ptr_behaviour, fields, is_data
from typing import Any, get_args, get_origin, List, Union
import torch
from sam3.model.data_misc import (
BatchedDatapoint,
BatchedFindTarget,
@@ -217,9 +216,9 @@ def collate_fn_api(
text_batch.append(q.query_text)
stages[stage_id].text_ids.append(text_batch.index(q.query_text))
assert (
q.inference_metadata is not None
), "inference_metadata must be provided when FindQueryLoaded is created."
assert q.inference_metadata is not None, (
"inference_metadata must be provided when FindQueryLoaded is created."
)
for f in fields(q.inference_metadata):
getattr(find_metadatas[stage_id], f.name).append(
getattr(q.inference_metadata, f.name)

View File

@@ -19,10 +19,8 @@ import torch.utils.data
import torchvision
from decord import cpu, VideoReader
from iopath.common.file_io import g_pathmgr
from PIL import Image as PILImage
from PIL.Image import DecompressionBombError
from sam3.model.box_ops import box_xywh_to_xyxy
from torchvision.datasets.vision import VisionDataset
@@ -234,9 +232,9 @@ class CustomCocoDetectionAPI(VisionDataset):
if self.coco is not None:
return
assert g_pathmgr.isfile(
self.annFile
), f"please provide valid annotation file. Missing: {self.annFile}"
assert g_pathmgr.isfile(self.annFile), (
f"please provide valid annotation file. Missing: {self.annFile}"
)
annFile = g_pathmgr.get_local_path(self.annFile)
if self.coco is not None:
@@ -326,9 +324,9 @@ class CustomCocoDetectionAPI(VisionDataset):
else:
num_queries_per_stage = stage2num_queries.most_common(1)[0][1]
for stage, num_queries in stage2num_queries.items():
assert (
num_queries == num_queries_per_stage
), f"Number of queries in stage {stage} is {num_queries}, expected {num_queries_per_stage}"
assert num_queries == num_queries_per_stage, (
f"Number of queries in stage {stage} is {num_queries}, expected {num_queries_per_stage}"
)
for query_id, query in enumerate(queries):
h, w = id2imsize[query["image_id"]]

View File

@@ -3,7 +3,6 @@
# pyre-unsafe
import copy
import io
import json
import logging
@@ -16,7 +15,6 @@ from typing import Any, Dict, List, Optional, Set, Tuple, Union
import torch
import torchvision
# from decord import cpu, VideoReader
from iopath.common.file_io import PathManager
@@ -220,9 +218,9 @@ class VideoGroundingDataset(Sam3ImageDataset):
for query in filtered_queries:
ptr_x_is_empty = query["ptr_x_query_id"] in [None, -1]
ptr_y_is_empty = query["ptr_y_query_id"] in [None, -1]
assert (
ptr_x_is_empty and ptr_y_is_empty
), "Remapping stage ids is not supported for queries with non-empty ptr_x or ptr_y pointers"
assert ptr_x_is_empty and ptr_y_is_empty, (
"Remapping stage ids is not supported for queries with non-empty ptr_x or ptr_y pointers"
)
query["query_processing_order"] = stage_id_old2new[
query["query_processing_order"]
]