Implementation:CARLA simulator Carla World Tick With Queue
| Knowledge Sources | |
|---|---|
| Domains | Simulation, Perception |
| Last Updated | 2026-02-15 00:00 GMT |
Overview
Concrete tool for advancing the simulation one frame and collecting temporally aligned data from all sensors using thread-safe queues provided by the CARLA simulator and Python standard library.
Description
World.tick() advances the simulation by one fixed timestep in synchronous mode. It blocks until the server has completed physics, rendering, and sensor data generation for that frame, then returns the frame number. Combined with Python's queue.Queue (populated by sensor callbacks registered via Sensor.listen()), this forms the producer-consumer synchronization pattern. After each tick, calling Queue.get(timeout) on each sensor's queue retrieves exactly one data frame per sensor, all guaranteed to correspond to the same simulation instant.
Usage
This pattern is used inside the main data collection loop. World.tick() is called once per iteration to advance the simulation, followed by Queue.get() calls for each sensor to collect the synchronized data. The timeout parameter prevents the loop from hanging if a sensor fails to produce data.
Code Reference
Source Location
- Repository: CARLA
- File:
LibCarla/source/carla/client/World.cpp(L156-161),PythonAPI/carla/src/World.cpp(L321-323) - Lines: L156-161 (C++ tick), L321-323 (Python binding)
Signature
World.tick(timeout: float = 0.0) -> int
queue.Queue.get(block: bool = True, timeout: float = None) -> Any
Import
import carla
import queue
I/O Contract
Inputs
| Name | Type | Required | Description |
|---|---|---|---|
| timeout (tick) | float | No | Maximum time in seconds to wait for the server to complete the tick. Default 0.0 means wait indefinitely. Raises TimeoutError if exceeded. |
| block (get) | bool | No | If True (default), block until an item is available or timeout expires. |
| timeout (get) | float | No | Maximum time in seconds to wait for data in the queue. Raises queue.Empty if exceeded. |
Outputs
| Name | Type | Description |
|---|---|---|
| frame_id | int | The simulation frame number for the completed tick. Returned by World.tick(). |
| sensor_data | carla.SensorData | The sensor observation retrieved from the queue. Typed subclass depends on sensor modality (carla.Image, carla.LidarMeasurement, etc.). |
Usage Examples
Basic Example
import carla
import queue
client = carla.Client('localhost', 2000)
client.set_timeout(10.0)
world = client.get_world()
# Enable synchronous mode
settings = world.get_settings()
original_settings = world.get_settings()
settings.synchronous_mode = True
settings.fixed_delta_seconds = 0.05
world.apply_settings(settings)
# Assume sensors are already spawned and attached to ego_vehicle
# Create queues and register callbacks
rgb_queue = queue.Queue()
lidar_queue = queue.Queue()
rgb_sensor.listen(rgb_queue.put)
lidar_sensor.listen(lidar_queue.put)
# Synchronous data collection loop
try:
for frame_num in range(500): # Collect 500 frames
# Advance the simulation by one tick
frame_id = world.tick()
# Collect synchronized data from all sensors
try:
rgb_data = rgb_queue.get(timeout=2.0)
lidar_data = lidar_queue.get(timeout=2.0)
except queue.Empty:
print(f"WARNING: Sensor data missing at frame {frame_id}")
continue
# Verify temporal alignment
assert rgb_data.frame == frame_id
assert lidar_data.frame == frame_id
# Process the synchronized data
rgb_data.save_to_disk(f'output/rgb/{frame_id:06d}.png')
lidar_data.save_to_disk(f'output/lidar/{frame_id:06d}.ply')
if frame_num % 100 == 0:
print(f"Collected frame {frame_id}")
finally:
world.apply_settings(original_settings)
Full Multi-Sensor Collection Loop
import carla
import queue
import numpy as np
client = carla.Client('localhost', 2000)
client.set_timeout(10.0)
world = client.get_world()
original_settings = world.get_settings()
settings = world.get_settings()
settings.synchronous_mode = True
settings.fixed_delta_seconds = 0.05 # 20 Hz
world.apply_settings(settings)
bp_lib = world.get_blueprint_library()
ego_vehicle = world.get_actors().filter('vehicle.*')[0]
# --- Spawn sensors ---
rgb_bp = bp_lib.find('sensor.camera.rgb')
rgb_bp.set_attribute('image_size_x', '1920')
rgb_bp.set_attribute('image_size_y', '1080')
rgb_bp.set_attribute('fov', '110')
rgb_sensor = world.spawn_actor(
rgb_bp,
carla.Transform(carla.Location(x=1.5, z=2.4)),
attach_to=ego_vehicle
)
depth_bp = bp_lib.find('sensor.camera.depth')
depth_bp.set_attribute('image_size_x', '1920')
depth_bp.set_attribute('image_size_y', '1080')
depth_bp.set_attribute('fov', '110')
depth_sensor = world.spawn_actor(
depth_bp,
carla.Transform(carla.Location(x=1.5, z=2.4)),
attach_to=ego_vehicle
)
lidar_bp = bp_lib.find('sensor.lidar.ray_cast')
lidar_bp.set_attribute('channels', '64')
lidar_bp.set_attribute('range', '100.0')
lidar_bp.set_attribute('points_per_second', '1200000')
lidar_bp.set_attribute('rotation_frequency', '20')
lidar_sensor = world.spawn_actor(
lidar_bp,
carla.Transform(carla.Location(z=2.5)),
attach_to=ego_vehicle
)
gnss_bp = bp_lib.find('sensor.other.gnss')
gnss_sensor = world.spawn_actor(
gnss_bp,
carla.Transform(),
attach_to=ego_vehicle
)
imu_bp = bp_lib.find('sensor.other.imu')
imu_sensor = world.spawn_actor(
imu_bp,
carla.Transform(),
attach_to=ego_vehicle
)
sensors = [rgb_sensor, depth_sensor, lidar_sensor, gnss_sensor, imu_sensor]
# --- Create queues and register callbacks ---
sensor_queues = {}
sensor_names = ['rgb', 'depth', 'lidar', 'gnss', 'imu']
for name, sensor in zip(sensor_names, sensors):
q = queue.Queue()
sensor.listen(q.put)
sensor_queues[name] = q
# --- Synchronous collection loop ---
NUM_FRAMES = 1000
TIMEOUT = 2.0
try:
for i in range(NUM_FRAMES):
# Advance simulation
frame_id = world.tick()
# Collect all sensor data for this frame
frame_data = {}
all_received = True
for name in sensor_names:
try:
data = sensor_queues[name].get(timeout=TIMEOUT)
frame_data[name] = data
except queue.Empty:
print(f"Frame {frame_id}: missing {name} data")
all_received = False
if not all_received:
continue
# Verify all data is from the same frame
frames = {name: frame_data[name].frame for name in sensor_names}
assert len(set(frames.values())) == 1, f"Frame mismatch: {frames}"
# Save/process each sensor's data
frame_data['rgb'].save_to_disk(f'output/rgb/{frame_id:06d}.png')
frame_data['depth'].save_to_disk(
f'output/depth/{frame_id:06d}.png',
carla.ColorConverter.LogarithmicDepth
)
frame_data['lidar'].save_to_disk(f'output/lidar/{frame_id:06d}.ply')
# Access GNSS data
gnss = frame_data['gnss']
lat, lon, alt = gnss.latitude, gnss.longitude, gnss.altitude
# Access IMU data
imu = frame_data['imu']
accel = imu.accelerometer # carla.Vector3D
gyro = imu.gyroscope # carla.Vector3D
compass = imu.compass # float (radians)
if i % 100 == 0:
print(f"Frame {frame_id}: GPS=({lat:.6f}, {lon:.6f}, {alt:.1f})")
finally:
# Cleanup
for sensor in sensors:
sensor.stop()
sensor.destroy()
world.apply_settings(original_settings)