This commit is contained in:
2026-02-15 12:25:24 +08:00
parent 2ec3c0711a
commit 8bb00ac928
2 changed files with 43 additions and 4 deletions

39
test.py Normal file
View File

@@ -0,0 +1,39 @@
import torch
#################################### For Image ####################################
from PIL import Image
from sam3.model_builder import build_sam3_image_model
from sam3.model.sam3_image_processor import Sam3Processor
# Load the model
model = build_sam3_image_model()
processor = Sam3Processor(model)
# Load an image
image = Image.open("/home/quant/data/dev/sam3-main/assets/player.gif")
inference_state = processor.set_image(image)
# Prompt the model with text
output = processor.set_text_prompt(state=inference_state, prompt="pepole")
# Get the masks, bounding boxes, and scores
masks, boxes, scores = output["masks"], output["boxes"], output["scores"]
#################################### For Video ####################################
# from sam3.model_builder import build_sam3_video_predictor
# video_predictor = build_sam3_video_predictor()
# video_path = "<YOUR_VIDEO_PATH>" # a JPEG folder or an MP4 video file
# # Start a session
# response = video_predictor.handle_request(
# request=dict(
# type="start_session",
# resource_path=video_path,
# )
# )
# response = video_predictor.handle_request(
# request=dict(
# type="add_prompt",
# session_id=response["session_id"],
# frame_index=0, # Arbitrary frame index
# text="<YOUR_TEXT_PROMPT>",
# )
# )
# output = response["outputs"]