1.1.0版本
This commit is contained in:
2
UniTAP/dev/modules/capturer/__init__.py
Normal file
2
UniTAP/dev/modules/capturer/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
from .capture import Capturer, CaptureConfig, CaptureStatus, CaptureError, AudioCaptureStatus,\
|
||||
EventCaptureStatus, VideoCaptureStatus
|
||||
550
UniTAP/dev/modules/capturer/capture.py
Normal file
550
UniTAP/dev/modules/capturer/capture.py
Normal file
@@ -0,0 +1,550 @@
|
||||
import time
|
||||
from typing import Union
|
||||
|
||||
from UniTAP.libs.lib_tsi.tsi_io import DeviceIO
|
||||
from UniTAP.libs.lib_tsi.tsi import *
|
||||
from UniTAP.dev.modules.capturer.statuses import CaptureStatus, AudioCaptureStatus, EventCaptureStatus, \
|
||||
VideoCaptureStatus, BulkCaptureStatus
|
||||
from UniTAP.common import AudioFrameData, VideoFrameDSC, create_from_pps
|
||||
from UniTAP.dev.ports.modules.capturer.event.event_types import EventData
|
||||
from threading import Lock
|
||||
from UniTAP.utils import function_scheduler
|
||||
from UniTAP.dev.ports.modules.capturer.bulk.private_bulk_types import *
|
||||
from UniTAP.dev.ports.modules.capturer.bulk.bulk_types import *
|
||||
from .types import *
|
||||
|
||||
|
||||
TIMEOUT = 10
|
||||
|
||||
|
||||
class CaptureConfig:
|
||||
class Type(IntEnum):
|
||||
NONE = -1
|
||||
LIVE = 0
|
||||
BUFFERED = 1
|
||||
|
||||
def __init__(self):
|
||||
self.audio = False
|
||||
self.video = False
|
||||
self.event = False
|
||||
self.type = CaptureConfig.Type.NONE
|
||||
self.frame_count = 0
|
||||
|
||||
def from_int(self, value: int):
|
||||
self.video = bool(value & 1 << 3)
|
||||
self.audio = bool(value & 1 << 4)
|
||||
self.event = bool(value & 1 << 5)
|
||||
self.type = CaptureConfig.Type.LIVE if value & (1 << 2) else CaptureConfig.Type.BUFFERED
|
||||
self.frame_count = value >> 8
|
||||
|
||||
def to_int(self) -> int:
|
||||
value = 0
|
||||
|
||||
if self.video:
|
||||
value |= (1 << 3)
|
||||
|
||||
if self.audio:
|
||||
value |= (1 << 4)
|
||||
|
||||
if self.event:
|
||||
value |= (1 << 5)
|
||||
|
||||
if self.video or self.audio:
|
||||
if self.type == CaptureConfig.Type.LIVE:
|
||||
value |= (1 << 2)
|
||||
else:
|
||||
value |= (self.frame_count << 8)
|
||||
|
||||
return value
|
||||
|
||||
|
||||
class Capturer:
|
||||
|
||||
def __init__(self, port_io: DeviceIO):
|
||||
self.__io = port_io
|
||||
self.__config = CaptureConfig()
|
||||
self.__status = CaptureStatus.Unknown
|
||||
self.__mutex = Lock()
|
||||
|
||||
@property
|
||||
def config(self):
|
||||
return self.__config
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
return self.__status
|
||||
|
||||
@status.setter
|
||||
def status(self, value: CaptureStatus):
|
||||
self.__status = value
|
||||
|
||||
@property
|
||||
def device(self):
|
||||
return self.__io
|
||||
|
||||
@property
|
||||
def video_capturer_status(self) -> VideoCaptureStatus:
|
||||
return VideoCaptureStatus(self.__io.get(TSI_VIDCAP_CAPTURE_STATUS_R, c_int)[1])
|
||||
|
||||
@property
|
||||
def audio_capturer_status(self) -> AudioCaptureStatus:
|
||||
return AudioCaptureStatus(self.__io.get(TSI_R_AUDCAP_STATUS, c_int)[1])
|
||||
|
||||
@property
|
||||
def event_capturer_status(self) -> EventCaptureStatus:
|
||||
return EventCaptureStatus(self.__io.get(TSI_EVCAP_CTRL, c_uint32)[1] & 0xFF)
|
||||
|
||||
@property
|
||||
def bulk_capturer_status(self) -> BulkCaptureStatus:
|
||||
return BulkCaptureStatus(self.__io.get(TSI_BULK_CAPTURE_STATUS_R, c_uint32)[1] & 0x3)
|
||||
|
||||
def start_capture(self, config: CaptureConfig):
|
||||
self.__mutex.acquire()
|
||||
|
||||
self.__config.from_int(self.__current_config().to_int() | config.to_int())
|
||||
self.__io.set(TSI_CAP_CONFIG, self.__config.to_int())
|
||||
|
||||
if self.__io.set(TSI_W_CAP_COMMAND, 1) == 0:
|
||||
self.__status = CaptureStatus.Running
|
||||
else:
|
||||
self.__status = CaptureStatus.Unknown
|
||||
|
||||
self.__mutex.release()
|
||||
|
||||
def stop_capture(self, config: CaptureConfig):
|
||||
self.__mutex.acquire()
|
||||
|
||||
self.__config.from_int(self.__current_config().to_int() & ~config.to_int())
|
||||
self.__io.set(TSI_CAP_CONFIG, self.__config.to_int())
|
||||
|
||||
if self.__io.set(TSI_W_CAP_COMMAND, 2) == 0:
|
||||
self.__status = CaptureStatus.Stop
|
||||
else:
|
||||
self.__status = CaptureStatus.Unknown
|
||||
|
||||
self.__mutex.release()
|
||||
|
||||
def __capture_events(self, event_count=0):
|
||||
event = EventData()
|
||||
|
||||
if event_count > 0:
|
||||
event_size = self.__io.get(TSI_R_EVCAP_DATA, None, 0)[0]
|
||||
if event_size > 0:
|
||||
event.data = bytearray(self.__io.get(TSI_R_EVCAP_DATA, c_ubyte, event_size)[1])
|
||||
|
||||
return event
|
||||
|
||||
def get_buffer_capacity(self, stream_number: int = None):
|
||||
if stream_number is not None:
|
||||
self.__io.set(TSI_DPRX_STREAM_SELECT, stream_number, c_uint32)
|
||||
self.__io.set(TSI_DPRX_MSA_COMMAND_W, 2, c_uint32)
|
||||
|
||||
width = self.__io.get(TSI_R_INPUT_WIDTH , c_uint32)[1]
|
||||
height = self.__io.get(TSI_R_INPUT_HEIGHT , c_uint32)[1]
|
||||
total_memory_bytes = self.__io.get(TSI_MEMORY_SIZE_R, c_uint64)[1]
|
||||
bpc = self.__io.get(TSI_INPUT_COLOR_DEPTH_R , c_uint32)[1]
|
||||
color_format = self.__io.get(TSI_INPUT_COLOR_MODE_R , c_uint32)[1]
|
||||
|
||||
bpp = self._get_bits_per_pixel(bpc, color_format)
|
||||
line_alignment = 1023
|
||||
pixel_size = self._get_pixel_size(color_format, bpp)
|
||||
line_size = width * pixel_size
|
||||
line_pitch = self._align(line_size, line_alignment)
|
||||
one_frame_size_bytes = (height + 1) * line_pitch
|
||||
|
||||
if one_frame_size_bytes == 0:
|
||||
return 0
|
||||
else:
|
||||
return int(total_memory_bytes / one_frame_size_bytes)
|
||||
|
||||
@staticmethod
|
||||
def _get_bits_per_pixel(bpc, color_format) -> int:
|
||||
if color_format in [0, 1, 6]:
|
||||
return 0
|
||||
elif color_format in [2, 4, 9]:
|
||||
return 3 * bpc
|
||||
elif color_format == 3:
|
||||
return 2 * bpc
|
||||
elif color_format == 5:
|
||||
return 3 * bpc / 2
|
||||
elif color_format == 7:
|
||||
return bpc
|
||||
elif color_format == 8:
|
||||
return bpc
|
||||
|
||||
@staticmethod
|
||||
def _get_pixel_size(color_format, bpp):
|
||||
if color_format == 5:
|
||||
return 4 if bpp >= 18 else 2
|
||||
elif color_format in [7, 8]:
|
||||
return 6 if bpp > 10 else 4
|
||||
else:
|
||||
return 6 if bpp > 32 else 4
|
||||
|
||||
@staticmethod
|
||||
def _align(value, alignment):
|
||||
return (value + alignment) & ~alignment
|
||||
|
||||
def get_available_events_count(self) -> int:
|
||||
return self.__io.get(TSI_R_EVCAP_COUNT, c_uint32)[1]
|
||||
|
||||
def capture_n_events(self, events_count: int):
|
||||
if events_count <= 0:
|
||||
raise ValueError(f"Events count must be more than 0.")
|
||||
|
||||
buffer = []
|
||||
|
||||
def is_enough_events():
|
||||
return self.get_available_events_count() >= events_count
|
||||
|
||||
function_scheduler(is_enough_events, interval=1, timeout=TIMEOUT)
|
||||
|
||||
for i in range(events_count):
|
||||
buffer.append(self.__capture_events(self.get_available_events_count()))
|
||||
|
||||
return buffer
|
||||
|
||||
def read_all_events(self):
|
||||
buffer = []
|
||||
while self.get_available_events_count() > 0:
|
||||
buffer.append(self.__capture_events(self.get_available_events_count()))
|
||||
|
||||
return buffer
|
||||
|
||||
def __capture_audio(self, m_sec=1000):
|
||||
|
||||
audio_frame = AudioFrameData()
|
||||
|
||||
audio_frame.channel_count = self.__io.get(TSI_R_AUDCAP_CHANNEL_COUNT, c_int)[1]
|
||||
audio_frame.sample_size = self.__io.get(TSI_R_AUDCAP_SAMPLE_SIZE, c_int)[1] * 8
|
||||
audio_frame.sample_rate = self.__io.get(TSI_R_AUDCAP_SAMPLE_RATE, c_int)[1]
|
||||
audio_frame.timestamp = self.__io.get(TSI_R_AUDCAP_TIMESTAMP, c_uint64)[1]
|
||||
audio_frame.samples = self.__io.get(TSI_R_AUDCAP_SAMPLE_COUNT, c_int)[1]
|
||||
audio_frame.frame_counter = self.__io.get(TSI_R_AUDCAP_FRAME_COUNTER, c_int)[1]
|
||||
audio_frame.sample_format = self.__io.get(TSI_R_AUDCAP_SAMPLE_FORMAT, c_int)[1]
|
||||
|
||||
min_buff_size = self.__io.get(TSI_R_AUDCAP_MIN_BUFFER_SIZE, c_uint32)[1]
|
||||
audio_frame.data = self.__io.get(TSI_R_AUDCAP_SAMPLE_DATA, c_uint8, min_buff_size)[1]
|
||||
|
||||
m_sec -= 1000 * len(audio_frame.data) / 2 / audio_frame.channel_count / audio_frame.sample_rate
|
||||
|
||||
return audio_frame, m_sec
|
||||
|
||||
def capture_audio_by_n_frames(self, frames_count: int, timeout: int = None):
|
||||
if frames_count <= 0:
|
||||
raise ValueError(f"Frames count must be more than 0.")
|
||||
|
||||
buffer = []
|
||||
time_break = False
|
||||
|
||||
timeout = timeout if timeout is not None else TIMEOUT
|
||||
|
||||
while not time_break and len(buffer) < frames_count:
|
||||
captured = 0
|
||||
start_time = time.time()
|
||||
while captured < 10:
|
||||
status = self.audio_capturer_status
|
||||
current_time = time.time()
|
||||
if current_time - start_time > timeout:
|
||||
time_break = True
|
||||
break
|
||||
if status == AudioCaptureStatus.Stop:
|
||||
continue
|
||||
|
||||
audio_frame, m_sec = self.__capture_audio()
|
||||
if len(audio_frame.data) > 0:
|
||||
captured += 1
|
||||
buffer.append(audio_frame)
|
||||
|
||||
return buffer
|
||||
|
||||
def capture_audio_by_m_sec(self, m_sec: int):
|
||||
if m_sec <= 0:
|
||||
raise ValueError(f"Seconds count must be more than 0.")
|
||||
|
||||
buffer = []
|
||||
time_break = False
|
||||
|
||||
while m_sec > 0 and not time_break:
|
||||
captured = 0
|
||||
start_time = time.time()
|
||||
while captured < 10 and m_sec > 0:
|
||||
status = self.audio_capturer_status
|
||||
current_time = time.time()
|
||||
if current_time - start_time > TIMEOUT:
|
||||
time_break = True
|
||||
break
|
||||
if status == AudioCaptureStatus.Stop:
|
||||
continue
|
||||
|
||||
audio_frame, m_sec = self.__capture_audio(m_sec=m_sec)
|
||||
if len(audio_frame.data) > 0:
|
||||
captured += 1
|
||||
buffer.append(audio_frame)
|
||||
|
||||
return buffer
|
||||
|
||||
def get_available_video_frame_count(self):
|
||||
return self.__io.get(TSI_VIDCAP_AVAILABLE_FRAME_COUNT, c_int)[1]
|
||||
|
||||
def __check_available_video(self, timeout) -> bool:
|
||||
def is_video_available(capturer):
|
||||
return capturer.video_capturer_status == VideoCaptureStatus.LiveModeActive
|
||||
|
||||
return function_scheduler(is_video_available, self, interval=1, timeout=timeout)
|
||||
|
||||
def __check_available_buffered_video(self, timeout) -> bool:
|
||||
def is_video_available(capturer):
|
||||
return capturer.video_capturer_status == VideoCaptureStatus.Transferring
|
||||
|
||||
return function_scheduler(is_video_available, self, interval=1, timeout=timeout)
|
||||
|
||||
def __check_available_bulk_data(self, timeout) -> bool:
|
||||
def is_bulk_available(capturer):
|
||||
return capturer.bulk_capturer_status == BulkCaptureStatus.Transferring
|
||||
|
||||
return function_scheduler(is_bulk_available, self, interval=1, timeout=timeout)
|
||||
|
||||
def __capture_video(self, timeout=TIMEOUT,
|
||||
capture_type=CaptureConfig.Type.LIVE) -> Union[VideoFrame, VideoFrameDSC]:
|
||||
if timeout <= 0:
|
||||
raise ValueError(f"Timeout must be more than 0.")
|
||||
|
||||
if capture_type == CaptureConfig.Type.BUFFERED:
|
||||
if not self.__check_available_buffered_video(timeout):
|
||||
raise BufferedCaptureError(
|
||||
f"Cannot get frames from buffer. "
|
||||
f"Current buffered capture status is {self.video_capturer_status.name}"
|
||||
)
|
||||
else:
|
||||
if not self.__check_available_video(timeout):
|
||||
raise CaptureError(
|
||||
f"Cannot start to capture video. Current video capture status {self.video_capturer_status.name}")
|
||||
|
||||
try:
|
||||
result = self.__io.set(TSI_VIDCAP_CAPTURE_NEXT_W, 0)
|
||||
if result == TSI_ERROR_DATA_PROTECTION_ENABLED:
|
||||
raise CaptureError("Video data is HDCP protected. Capturing is not available.")
|
||||
except AssertionError as e:
|
||||
raise CaptureError(f"Error: {e}")
|
||||
|
||||
try:
|
||||
min_buffer_size = self.__io.get(TSI_R_VIDCAP_MIN_BUFFER_SIZE, c_int)[1]
|
||||
if min_buffer_size <= 0:
|
||||
raise ValueError("Minimum buffer size must be more than 0")
|
||||
except AssertionError as e:
|
||||
raise CaptureError(f"Error: {e}")
|
||||
|
||||
try:
|
||||
frame_data = bytearray(self.__io.get(TSI_R_VIDCAP_FRAME_DATA, c_uint8, min_buffer_size)[1])
|
||||
if len(frame_data) <= 0:
|
||||
raise ValueError("Minimum length of captured data must be more than 0")
|
||||
except AssertionError as e:
|
||||
raise CaptureError(f"Error: {e}")
|
||||
|
||||
frame_attributes = self.__io.get(TSI_VIDCAP_FRAME_HEADER_R, VideoFrameHeader)[1]
|
||||
|
||||
if frame_attributes.is_dsc() and len(frame_data) > 128:
|
||||
vf = VideoFrameDSC()
|
||||
vf.compression_info = create_from_pps(frame_data[:128])
|
||||
else:
|
||||
vf = VideoFrame()
|
||||
|
||||
vf.width = frame_attributes.width
|
||||
vf.height = frame_attributes.height
|
||||
vf.color_info.bpc = frame_attributes.bpc
|
||||
vf.color_info.dynamic_range = frame_attributes.dynamic_range
|
||||
vf.color_info.color_format = frame_attributes.color_format
|
||||
vf.color_info.colorimetry = frame_attributes.colorimetry
|
||||
vf.data_info.component_order = DataInfo.ComponentOrder.CO_UCDRX
|
||||
vf.data_info.alignment = DataInfo.Alignment.A_MSB
|
||||
vf.data_info.packing = DataInfo.Packing.P_PACKED
|
||||
vf.timestamp = frame_attributes.timestamp
|
||||
vf.data = frame_data
|
||||
|
||||
return vf
|
||||
|
||||
def capture_video_by_n_frames(self, frames_count: int, capture_type: CaptureConfig.Type = CaptureConfig.Type.LIVE):
|
||||
if frames_count <= 0:
|
||||
raise ValueError(f"Frames count must be more than 0.")
|
||||
|
||||
buffer = []
|
||||
|
||||
if capture_type == CaptureConfig.Type.BUFFERED:
|
||||
timeout = max(10, round(0.006 * frames_count))
|
||||
try:
|
||||
for i in range(frames_count):
|
||||
buffer.append(self.__capture_video(timeout=timeout, capture_type=capture_type))
|
||||
except BufferedCaptureError as e:
|
||||
return buffer
|
||||
else:
|
||||
for i in range(frames_count):
|
||||
buffer.append(self.__capture_video())
|
||||
|
||||
return buffer
|
||||
|
||||
def capture_video_by_n_sec(self, sec: int):
|
||||
if sec <= 0:
|
||||
raise ValueError(f"Seconds count must be more than 0.")
|
||||
|
||||
buffer = []
|
||||
|
||||
time_start = time.time()
|
||||
while time.time() - time_start < sec:
|
||||
buffer.append(self.__capture_video())
|
||||
|
||||
return buffer
|
||||
|
||||
def set_video_stream_number(self, number: int):
|
||||
self.__mutex.acquire()
|
||||
self.__io.set(TSI_DPRX_STREAM_SELECT, number, c_uint32)
|
||||
self.__mutex.release()
|
||||
|
||||
def __current_config(self) -> CaptureConfig:
|
||||
config = CaptureConfig()
|
||||
config.from_int(self.__io.get(TSI_CAP_CONFIG, c_uint)[1])
|
||||
return config
|
||||
|
||||
# Capture CRC
|
||||
def capture_crc(self, crc_frame_count: int = 1) -> List[tuple[int, int, int]]:
|
||||
if crc_frame_count <= 0:
|
||||
raise ValueError(f"Incorrect crc frame count: {crc_frame_count}")
|
||||
|
||||
crc_values = self.__io.get(TSI_VIDCAP_SIGNAL_CRC_R, CrcStruct, crc_frame_count)[1]
|
||||
crc_list = []
|
||||
if crc_frame_count == 1:
|
||||
crc_list = [(crc_values.r, crc_values.g, crc_values.b)]
|
||||
else:
|
||||
[crc_list.append((crc.r, crc.g, crc.b)) for crc in crc_values]
|
||||
return crc_list
|
||||
|
||||
# Bulk Capturer
|
||||
def read_bulk_capture_caps(self) -> CaptureCaps:
|
||||
return self.__io.get(TSI_BULK_CAPTURE_CAPS_R, CaptureCaps)[1]
|
||||
|
||||
def read_bulk_trigger_caps(self) -> int:
|
||||
return self.__io.get(TSI_BULK_TRIGGER_CAPS_R, c_uint32)[1]
|
||||
|
||||
def write_bulk_trigger_settings(self, trigger_mask: int, trigger_config: list, trigger_config_ext: list):
|
||||
self.__io.set(TSI_BULK_TRIGGER_MASK_W, trigger_mask, c_uint32)
|
||||
self.__io.set(TSI_BULK_TRIGGER_CONFIGURATION_W, trigger_config, c_uint32, data_count=len(trigger_config))
|
||||
self.__io.set(TSI_BULK_TRIGGER_CONFIGURATION_EXT_W, trigger_config_ext, c_uint32,
|
||||
data_count=len(trigger_config_ext))
|
||||
|
||||
def write_bulk_size(self, size: int):
|
||||
data = SBlock()
|
||||
data.BLOCK = 0
|
||||
data.OFFSET = 0
|
||||
data.SIZE = size
|
||||
self.__io.set(TSI_BULK_CAPTURE_BLOCK, data, SBlock)
|
||||
|
||||
def write_encoding_type(self, value: EncodingTypeEnum):
|
||||
self.__io.set(TSI_BULK_CAPTURE_TYPE, value.value, c_uint32)
|
||||
|
||||
def read_encoding_type(self) -> EncodingTypeEnum:
|
||||
return EncodingTypeEnum(self.__io.get(TSI_BULK_CAPTURE_TYPE, c_uint32)[1])
|
||||
|
||||
def write_lane_count(self, value: LaneCountEnum):
|
||||
self.__io.set(TSI_BULK_CAPTURE_LANE_COUNT, value.value, c_uint32)
|
||||
|
||||
def read_lane_count(self) -> LaneCountEnum:
|
||||
return LaneCountEnum(self.__io.get(TSI_BULK_CAPTURE_LANE_COUNT, c_uint32)[1])
|
||||
|
||||
def write_bulk_gpio(self, gpio: bool):
|
||||
self.__io.set(TSI_BULK_CAPTURE_GPIO_W, TSI_BULK_CAPTURE_GPIO_5BIT if gpio else TSI_BULK_CAPTURE_GPIO_OFF,
|
||||
c_uint32)
|
||||
|
||||
def write_bulk_trigger_position(self, position: int):
|
||||
self.__io.set(TSI_BULK_TRIGGER_POS, position)
|
||||
|
||||
def start_bulk_capture(self):
|
||||
|
||||
self.__mutex.acquire()
|
||||
|
||||
self.__io.set(TSI_EVCAP_CTRL, 1)
|
||||
self.__io.set(TSI_BULK_CAPTURE_CONTROL_W, TSI_BULK_CAPTURE_START)
|
||||
|
||||
self.__mutex.release()
|
||||
|
||||
def stop_bulk_capture(self):
|
||||
self.__mutex.acquire()
|
||||
|
||||
self.__io.set(TSI_EVCAP_CTRL, 0)
|
||||
self.__io.set(TSI_BULK_CAPTURE_CONTROL_W, TSI_BULK_CAPTURE_STOP)
|
||||
|
||||
self.__mutex.release()
|
||||
|
||||
def start_event_capture(self):
|
||||
self.__mutex.acquire()
|
||||
|
||||
self.__io.set(TSI_EVCAP_CTRL, 1)
|
||||
self.__io.set(TSI_EVCAP_EVENT_SRC_EN, UCD_ALL_EVENTS)
|
||||
|
||||
self.__mutex.release()
|
||||
|
||||
def stop_event_capture(self):
|
||||
self.__mutex.acquire()
|
||||
|
||||
self.__io.set(TSI_EVCAP_CTRL, 0)
|
||||
|
||||
self.__mutex.release()
|
||||
|
||||
def clear_bulk_buffer(self):
|
||||
max_time_waiting = 5
|
||||
value = -1
|
||||
time_waited = time.time()
|
||||
|
||||
while value != TSI_BULK_STATUS_IDLE and time.time() - time_waited < max_time_waiting:
|
||||
self.__io.set(TSI_BULK_CAPTURE_CLEAR_W, 0)
|
||||
value = self.__io.get(TSI_BULK_CAPTURE_STATUS_R)[1]
|
||||
|
||||
def bulk_capture(self, all_size: int, trigger_enabled: Optional[TriggerVarType]) -> list:
|
||||
buffer = []
|
||||
iterations = int(all_size / (1024 * 1024))
|
||||
prev_status = 0
|
||||
last_bulk_capture_time = time.time()
|
||||
for i in range(iterations):
|
||||
event_count = self.__io.get(TSI_EVCAP_COUNT_R, c_uint32)[1]
|
||||
if event_count > 0:
|
||||
event_number = 0
|
||||
prev_timestamp = 0
|
||||
events_captured = 0
|
||||
while event_count > 0 and events_captured < 500:
|
||||
event_size = self.__io.get(TSI_R_EVCAP_DATA, None, 0)[0]
|
||||
if event_size > 0:
|
||||
cap_data = CapturedData()
|
||||
cap_data.data = bytearray(self.__io.get(TSI_R_EVCAP_DATA, c_ubyte, event_size)[1])
|
||||
cap_data.data = cap_data.data[3:]
|
||||
cap_data.type = CapturedDataType.Event
|
||||
cap_data.timestamp = int.from_bytes(bytes=cap_data.data[:8], byteorder='big')
|
||||
if cap_data.timestamp == prev_timestamp:
|
||||
event_number += 1
|
||||
else:
|
||||
event_number = 0
|
||||
buffer.append(cap_data)
|
||||
prev_timestamp = cap_data.timestamp
|
||||
events_captured += 1
|
||||
event_count -= 1
|
||||
|
||||
bulk_status = self.__io.get(TSI_BULK_CAPTURE_STATUS_R, c_int32)[1]
|
||||
if bulk_status == TSI_BULK_STATUS_IDLE and prev_status == TSI_BULK_STATUS_TRANSFERRING:
|
||||
return buffer
|
||||
prev_status = bulk_status
|
||||
|
||||
now = time.time()
|
||||
if trigger_enabled is not None and now - last_bulk_capture_time >= TIMEOUT:
|
||||
return buffer
|
||||
|
||||
if not self.__check_available_bulk_data(TIMEOUT):
|
||||
return buffer
|
||||
|
||||
self.__io.set(TSI_EVCAP_CTRL, 0)
|
||||
cap_data = CapturedData()
|
||||
cap_data.type = CapturedDataType.Bulk
|
||||
result, data, _ = self.__io.get(TSI_BULK_CAPTURE_DATA_R, c_ubyte, 1024 * 1024)
|
||||
cap_data.data = bytearray(data)
|
||||
if result >= TSI_SUCCESS and len(cap_data.data) > 0:
|
||||
buffer.append(cap_data)
|
||||
last_bulk_capture_time = time.time()
|
||||
|
||||
return buffer
|
||||
108
UniTAP/dev/modules/capturer/result_object.py
Normal file
108
UniTAP/dev/modules/capturer/result_object.py
Normal file
@@ -0,0 +1,108 @@
|
||||
from UniTAP.common.timestamp import Timestamp
|
||||
|
||||
|
||||
class ResultObject:
|
||||
"""
|
||||
The base class of all capture results.
|
||||
Contains information about `start_capture_time`, `end_capture_time`, `timestamp` and `buffer` with captured data.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.__start_capture_time = 0
|
||||
self.__end_capture_time = 0
|
||||
self.__timestamp = Timestamp(0)
|
||||
self.__buffer = []
|
||||
|
||||
@property
|
||||
def start_capture_time(self) -> int:
|
||||
"""
|
||||
Return start capture time.
|
||||
|
||||
Returns:
|
||||
object of `int` type
|
||||
"""
|
||||
return self.__start_capture_time
|
||||
|
||||
@property
|
||||
def end_capture_time(self) -> int:
|
||||
"""
|
||||
Return end capture time.
|
||||
|
||||
Returns:
|
||||
object of `int` type
|
||||
"""
|
||||
return self.__end_capture_time
|
||||
|
||||
@property
|
||||
def timestamp(self) -> Timestamp:
|
||||
"""
|
||||
Return timestamp.
|
||||
|
||||
Returns:
|
||||
object of `Timestamp` type
|
||||
"""
|
||||
return self.__timestamp
|
||||
|
||||
@property
|
||||
def buffer(self) -> list:
|
||||
"""
|
||||
Return buffer with captured data.
|
||||
|
||||
Returns:
|
||||
object of list type
|
||||
"""
|
||||
return self.__buffer
|
||||
|
||||
@buffer.setter
|
||||
def buffer(self, value):
|
||||
"""
|
||||
Set data to buffer
|
||||
|
||||
Args:
|
||||
value - any type of object
|
||||
"""
|
||||
self.__buffer.append(value)
|
||||
|
||||
@start_capture_time.setter
|
||||
def start_capture_time(self, start_capture_time: int):
|
||||
"""
|
||||
Set start capture time.
|
||||
|
||||
Args:
|
||||
start_capture_time (int) - must be more than 0.
|
||||
"""
|
||||
if start_capture_time <= 0:
|
||||
raise ValueError(f"Start capture time cannot be less than 0.")
|
||||
self.__start_capture_time = start_capture_time
|
||||
|
||||
@end_capture_time.setter
|
||||
def end_capture_time(self, end_capture_time: int):
|
||||
"""
|
||||
Set end capture time.
|
||||
|
||||
Args:
|
||||
end_capture_time (int) - must be more than 0.
|
||||
"""
|
||||
if end_capture_time <= 0:
|
||||
raise ValueError(f"End capture time cannot be less than 0.")
|
||||
self.__end_capture_time = end_capture_time
|
||||
|
||||
@timestamp.setter
|
||||
def timestamp(self, timestamp: int):
|
||||
"""
|
||||
Set timestamp.
|
||||
|
||||
Args:
|
||||
timestamp (int) - must be more than 0.
|
||||
"""
|
||||
if timestamp <= 0:
|
||||
raise ValueError(f"Timestamp cannot be less than 0.")
|
||||
self.__timestamp.value = timestamp
|
||||
|
||||
def clear(self):
|
||||
"""
|
||||
Clear all data.
|
||||
"""
|
||||
self.__start_capture_time = 0
|
||||
self.__end_capture_time = 0
|
||||
self.__timestamp = Timestamp(0)
|
||||
self.__buffer = []
|
||||
100
UniTAP/dev/modules/capturer/statuses.py
Normal file
100
UniTAP/dev/modules/capturer/statuses.py
Normal file
@@ -0,0 +1,100 @@
|
||||
from enum import IntEnum
|
||||
|
||||
|
||||
class CaptureStatus(IntEnum):
|
||||
Unknown = -1
|
||||
Stop = 0
|
||||
Running = 1
|
||||
|
||||
def __str__(self):
|
||||
if self.value == CaptureStatus.Unknown:
|
||||
return f"Capture status: {self.value}"
|
||||
elif self.value == CaptureStatus.Stop:
|
||||
return f"Capture status: is not working"
|
||||
elif self.value == CaptureStatus.Running:
|
||||
return f"Capture status: working"
|
||||
else:
|
||||
return f"Capture status: Unknown state"
|
||||
|
||||
|
||||
class BulkCaptureStatus(IntEnum):
|
||||
Unknown = -1
|
||||
Idle = 0
|
||||
Waiting = 1
|
||||
Capturing = 2
|
||||
Transferring = 3
|
||||
|
||||
def __str__(self):
|
||||
if self.value == BulkCaptureStatus.Unknown:
|
||||
return f"Bulk capture status: {self.value}"
|
||||
elif self.value == BulkCaptureStatus.Idle:
|
||||
return f"Bulk capture status: Doing nothing"
|
||||
elif self.value == BulkCaptureStatus.Waiting:
|
||||
return f"Bulk capture status: Waiting"
|
||||
elif self.value == BulkCaptureStatus.Capturing:
|
||||
return f"Bulk capture status: Capturing in progress"
|
||||
elif self.value == BulkCaptureStatus.Transferring:
|
||||
return f"Bulk capture status: Transferring"
|
||||
else:
|
||||
return f"Bulk capture status: Unknown state"
|
||||
|
||||
|
||||
class AudioCaptureStatus(IntEnum):
|
||||
Unknown = -1
|
||||
Stop = 0
|
||||
Running = 1
|
||||
|
||||
def __str__(self):
|
||||
if self.value == AudioCaptureStatus.Unknown:
|
||||
return f"Audio capture status: {self.value}"
|
||||
elif self.value == AudioCaptureStatus.Stop:
|
||||
return f"Audio capture status: is not working"
|
||||
elif self.value == AudioCaptureStatus.Running:
|
||||
return f"Audio capture status: working"
|
||||
else:
|
||||
return f"Video capture status: Unknown state"
|
||||
|
||||
|
||||
class VideoCaptureStatus(IntEnum):
|
||||
Unknown = -1
|
||||
Idle = 0 # Doing nothing
|
||||
Capturing = 1 # Capturing in progress
|
||||
Transferring = 2 # Transferring in progress
|
||||
LiveModeActive = 3 # Live mode active
|
||||
Malfunction = 4 # Malfunction (requires restart)
|
||||
Done = 7 # Capturing and transferring done
|
||||
|
||||
def __str__(self):
|
||||
if self.value == VideoCaptureStatus.Unknown:
|
||||
return f"Video capture status: {self.value}"
|
||||
elif self.value == VideoCaptureStatus.Idle:
|
||||
return f"Video capture status: Doing nothing"
|
||||
elif self.value == VideoCaptureStatus.Capturing:
|
||||
return f"Video capture status: Capturing in progress"
|
||||
elif self.value == VideoCaptureStatus.Transferring:
|
||||
return f"Video capture status: Transferring in progress"
|
||||
elif self.value == VideoCaptureStatus.LiveModeActive:
|
||||
return f"Video capture status: Live mode active"
|
||||
elif self.value == VideoCaptureStatus.Malfunction:
|
||||
return f"Video capture status: Malfunction (requires restart)"
|
||||
elif self.value == VideoCaptureStatus.Done:
|
||||
return f"Video capture status: Capturing and transferring done"
|
||||
else:
|
||||
return f"Video capture status: Unknown state"
|
||||
|
||||
|
||||
class EventCaptureStatus(IntEnum):
|
||||
Unknown = -1
|
||||
Stop = 0
|
||||
Running = 1
|
||||
Invalid = 0xFF
|
||||
|
||||
def __str__(self):
|
||||
if self.value == EventCaptureStatus.Unknown:
|
||||
return f"Event capture status: {self.value}"
|
||||
elif self.value == EventCaptureStatus.Stop:
|
||||
return f"Event capture status: is not working"
|
||||
elif self.value == EventCaptureStatus.Running:
|
||||
return f"Event capture status: working"
|
||||
else:
|
||||
return f"Video capture status: Unknown state"
|
||||
180
UniTAP/dev/modules/capturer/types.py
Normal file
180
UniTAP/dev/modules/capturer/types.py
Normal file
@@ -0,0 +1,180 @@
|
||||
from UniTAP.common.timestamp import Timestamp
|
||||
from ctypes import c_uint8, c_ubyte, c_uint16, c_uint32, c_uint64, Structure
|
||||
from enum import IntEnum
|
||||
from UniTAP.common import ColorInfo, DataInfo, VideoFrame
|
||||
|
||||
|
||||
class CapturedDataType(IntEnum):
|
||||
Unknown = 0
|
||||
Video = 1
|
||||
Audio = 2
|
||||
Event = 3
|
||||
Bulk = 4
|
||||
|
||||
|
||||
class CapturedData:
|
||||
|
||||
def __init__(self):
|
||||
self.frame_number = 0
|
||||
self.timestamp = 0
|
||||
self.width = 0
|
||||
self.height = 0
|
||||
self.dataFormat = 0
|
||||
self.colorimetry = 0
|
||||
self.colorMode = 0
|
||||
self.bpc = 0
|
||||
self.type = 0
|
||||
self.data = 0
|
||||
|
||||
|
||||
class CrcStruct(Structure):
|
||||
_fields_ = [
|
||||
("r", c_uint16),
|
||||
("g", c_uint16),
|
||||
("b", c_uint16),
|
||||
]
|
||||
|
||||
|
||||
class VideoFrameHeader(Structure):
|
||||
__BPC_PA_TO_INT = {
|
||||
0: 6,
|
||||
1: 8,
|
||||
2: 10,
|
||||
3: 12,
|
||||
4: 16,
|
||||
5: 7,
|
||||
6: 14
|
||||
}
|
||||
|
||||
__PA_PACKING_TO_VF_PACKING = {
|
||||
0: DataInfo.Packing.P_PLANAR,
|
||||
1: DataInfo.Packing.P_PLANAR
|
||||
}
|
||||
|
||||
__PA_CF_TO_CI_CF = {
|
||||
0: ColorInfo.ColorFormat.CF_RGB,
|
||||
1: ColorInfo.ColorFormat.CF_YCbCr_422,
|
||||
2: ColorInfo.ColorFormat.CF_YCbCr_444,
|
||||
3: ColorInfo.ColorFormat.CF_YCbCr_420,
|
||||
4: ColorInfo.ColorFormat.CF_Y_ONLY,
|
||||
5: ColorInfo.ColorFormat.CF_RAW,
|
||||
6: ColorInfo.ColorFormat.CF_DSC,
|
||||
7: ColorInfo.ColorFormat.CF_IDO_DEFINED
|
||||
}
|
||||
__PA_CR_TO_CI_CR = {
|
||||
0: ColorInfo.Colorimetry.CM_NONE,
|
||||
1: ColorInfo.Colorimetry.CM_SMPTE_170M,
|
||||
2: ColorInfo.Colorimetry.CM_ITUR_BT709,
|
||||
3: ColorInfo.Colorimetry.CM_NONE
|
||||
}
|
||||
|
||||
__PA_EXT_CR_TO_CI_CR = {
|
||||
0: ColorInfo.Colorimetry.CM_xvYCC601,
|
||||
1: ColorInfo.Colorimetry.CM_xvYCC709,
|
||||
2: ColorInfo.Colorimetry.CM_sYCC601,
|
||||
3: ColorInfo.Colorimetry.CM_AdobeYCC601,
|
||||
4: ColorInfo.Colorimetry.CM_AdobeRGB,
|
||||
5: ColorInfo.Colorimetry.CM_ITUR_BT2020_YcCbcCrc,
|
||||
6: ColorInfo.Colorimetry.CM_ITUR_BT2020_RGB,
|
||||
7: ColorInfo.Colorimetry.CM_ITUR_BT601
|
||||
}
|
||||
|
||||
class PixelAttributes(Structure):
|
||||
_fields_ = [
|
||||
("component_format", c_uint32, 4),
|
||||
("", c_uint32, 1),
|
||||
("bpc", c_uint32, 3),
|
||||
("video_mode", c_uint32, 1),
|
||||
("stereo_mode", c_uint32, 1),
|
||||
("field_id", c_uint32, 1),
|
||||
("stereo_id", c_uint32, 1),
|
||||
("blanked", c_uint32, 1),
|
||||
("encrypted", c_uint32, 1),
|
||||
("pixel_packing_format", c_uint32, 2),
|
||||
("color_format", c_uint32, 3),
|
||||
("dynamic_range", c_uint32, 1),
|
||||
("colorimetry", c_uint32, 2),
|
||||
("ext_colorimetry", c_uint32, 3),
|
||||
("", c_uint32, 1),
|
||||
("dsc_compressed", c_uint32, 1),
|
||||
("ext_color_format", c_uint32, 2),
|
||||
("video_footer", c_uint32, 1),
|
||||
("video_packet_crc", c_uint32, 1),
|
||||
]
|
||||
|
||||
_fields_ = [
|
||||
('sync', c_uint32),
|
||||
('number', c_uint32),
|
||||
('', c_uint32),
|
||||
('size_words', c_uint32),
|
||||
('frame_timestamp', c_uint64, 62),
|
||||
('time_unit', c_uint64, 2),
|
||||
('attributes', PixelAttributes),
|
||||
("f_width", c_uint32, 16),
|
||||
("f_height", c_uint32, 16)
|
||||
]
|
||||
|
||||
@property
|
||||
def bpc(self) -> int:
|
||||
return self.__BPC_PA_TO_INT.get(self.attributes.bpc, 0)
|
||||
|
||||
@property
|
||||
def dynamic_range(self) -> ColorInfo.DynamicRange:
|
||||
if self.attributes.dynamic_range:
|
||||
return ColorInfo.DynamicRange.DR_CTA
|
||||
else:
|
||||
return ColorInfo.DynamicRange.DR_VESA
|
||||
|
||||
@property
|
||||
def color_format(self) -> ColorInfo.ColorFormat:
|
||||
return self.__PA_CF_TO_CI_CF.get(self.attributes.color_format,
|
||||
ColorInfo.ColorFormat.CF_UNKNOWN)
|
||||
|
||||
@property
|
||||
def colorimetry(self) -> ColorInfo.Colorimetry:
|
||||
if self.attributes.colorimetry == 3:
|
||||
return self.__PA_EXT_CR_TO_CI_CR.get(self.attributes.ext_colorimetry,
|
||||
ColorInfo.Colorimetry.CM_NONE)
|
||||
else:
|
||||
return self.__PA_CR_TO_CI_CR.get(self.attributes.colorimetry,
|
||||
ColorInfo.Colorimetry.CM_NONE)
|
||||
|
||||
@property
|
||||
def timestamp(self) -> Timestamp:
|
||||
value = self.frame_timestamp
|
||||
value &= 0x7FFFFFFFFFFFFFFF
|
||||
value *= 100 if (self.time_unit & 0x1) else 1000
|
||||
return Timestamp(value)
|
||||
|
||||
@property
|
||||
def packing(self) -> DataInfo.Packing:
|
||||
return self.__PA_PACKING_TO_VF_PACKING.get(self.attributes.pixel_packing_format,
|
||||
DataInfo.Packing.P_UNKNOWN)
|
||||
|
||||
def is_dsc(self) -> bool:
|
||||
return self.attributes.color_format == 6
|
||||
|
||||
@property
|
||||
def width(self):
|
||||
if self.color_format == ColorInfo.ColorFormat.CF_YCbCr_420:
|
||||
return self.f_width << 1
|
||||
else:
|
||||
return self.f_width
|
||||
|
||||
@property
|
||||
def height(self):
|
||||
return self.f_height
|
||||
|
||||
|
||||
class CaptureError(Exception):
|
||||
|
||||
def __init__(self, message: str):
|
||||
self.__message = message
|
||||
super().__init__(self.__message)
|
||||
|
||||
|
||||
class BufferedCaptureError(Exception):
|
||||
|
||||
def __init__(self, message: str):
|
||||
self.__message = message
|
||||
super().__init__(self.__message)
|
||||
1
UniTAP/dev/modules/capturer/utils.py
Normal file
1
UniTAP/dev/modules/capturer/utils.py
Normal file
@@ -0,0 +1 @@
|
||||
from UniTAP.libs.lib_uicl.uicl_utils import *
|
||||
Reference in New Issue
Block a user