Low FPS on video stream #484
-
Search before asking
QuestionHi there, requirments.txt my app.py
Thanks in advance. AdditionalNo response |
Beta Was this translation helpful? Give feedback.
Replies: 4 comments 13 replies
-
Hi @gmijo47 👋🏻 Let me convert this question into a discussion first. |
Beta Was this translation helpful? Give feedback.
-
Could you run |
Beta Was this translation helpful? Give feedback.
-
Confirming the issue. Here are the reproduction steps: Dependencies:
Running a small RTSP stream on a Mac camera:
Code to view and annotate (no extra files needed): import numpy as np
from inference import InferencePipeline, get_model
from inference.core.interfaces.camera.entities import VideoFrame
from ultralytics import YOLO
import cv2
import supervision as sv
from typing import Any, List
COLORS = sv.ColorPalette.from_hex(["#E6194B", "#3CB44B", "#FFE119", "#3C76D1"])
COLOR_ANNOTATOR = sv.ColorAnnotator(color=COLORS)
LABEL_ANNOTATOR = sv.LabelAnnotator(
color=COLORS, text_color=sv.Color.from_hex("#000000")
)
def draw_fps(frame: np.ndarray, fps: float) -> np.ndarray:
match fps:
case fps if fps <= 5.0:
bg_color = "#D20103"
case fps if fps <= 12.5:
bg_color = "#FFDE59"
case _:
bg_color = "#7DDA58"
frame = sv.draw_text(
scene=frame,
text=f"{fps:.2f}",
text_anchor=sv.Point(40, 30),
background_color=sv.Color.from_hex(bg_color),
text_color=sv.Color.from_hex("#000000"),
)
return frame
def draw_detections(frame: np.ndarray, detections: sv.Detections) -> np.ndarray:
frame = COLOR_ANNOTATOR.annotate(
scene=frame,
detections=detections,
)
labels = [
f"#{class_id}"
for class_id in detections.class_id
]
frame = LABEL_ANNOTATOR.annotate(
scene=frame,
detections=detections,
labels=labels,
)
return frame
class DataProcessor:
def __init__(self):
self.fps_monitor = sv.FPSMonitor()
def on_prediction(self, detections_response: Any, frame: VideoFrame) -> None:
self.fps_monitor.tick()
fps = self.fps_monitor.fps
detections = sv.Detections(
xyxy=np.array([detections_response[0]]),
confidence=np.array([detections_response[2]]),
class_id=np.array([detections_response[3]]),
)
annotated_frame = frame.image.copy()
annotated_frame = draw_fps(annotated_frame, fps)
annotated_frame = draw_detections(annotated_frame, detections)
cv2.imshow("Processed Video", annotated_frame)
cv2.waitKey(1)
def main() -> None:
# # Option 1: Ultralytics
# model = YOLO("yolov8n")
# def inference_callback(frame: VideoFrame) -> sv.Detections:
# frame = frame[0]
# results = model(frame.image, device="cpu", verbose=False)[0]
# detections = sv.Detections.from_ultralytics(results)
# return detections
# Option 2: Inference
model = get_model("yolov8n-640")
def inference_callback(frame: VideoFrame) -> sv.Detections:
frame = frame[0]
results = model.infer(frame.image)[0]
detections = sv.Detections.from_inference(results)
return detections
processor = DataProcessor()
pipeline = InferencePipeline.init_with_custom_logic(
video_reference="udp://127.0.0.1:9090",
on_video_frame=inference_callback,
on_prediction=processor.on_prediction,
)
pipeline.start()
try:
pipeline.join()
except KeyboardInterrupt:
pipeline.terminate()
if __name__ == "__main__":
main() My guess would be that InferencePipeline can't keep up but doesn't drop any requests. |
Beta Was this translation helpful? Give feedback.
-
ok, I may have found the cause of strange behaviour, please verify on your end - if that's the reason I will provide explanation pipeline = InferencePipeline.init_with_custom_logic(
video_reference="udp://127.0.0.1:9090",
on_video_frame=inference_callback,
on_prediction=processor.on_prediction,
source_buffer_filling_strategy=BufferFillingStrategy.DROP_OLDEST, <---
source_buffer_consumption_strategy=BufferConsumptionStrategy.EAGER, <---
) |
Beta Was this translation helpful? Give feedback.
ok, I may have found the cause of strange behaviour, please verify on your end - if that's the reason I will provide explanation