Skip to content
122 changes: 121 additions & 1 deletion genesis/engine/entities/rigid_entity/rigid_entity.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from genesis.utils import mjcf as mju
from genesis.utils import terrain as tu
from genesis.utils import urdf as uu
from genesis.utils.misc import DeprecationError, broadcast_tensor, qd_to_numpy, qd_to_torch
from genesis.utils.misc import DeprecationError, broadcast_tensor, qd_to_numpy, qd_to_torch, tensor_to_array
from genesis.engine.states.entities import RigidEntityState

from ..base_entity import Entity
Expand Down Expand Up @@ -4154,6 +4154,126 @@ def get_mass(self):
mass += link.get_mass()
return mass

@gs.assert_built
def get_height_at(self, x: float, y: float) -> float:
"""
Get terrain height at world position (x, y).

Uses bilinear interpolation from the height field.

Parameters
Comment on lines +4157 to +4164
Copy link

Copilot AI Apr 11, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This PR introduces additional public API/features beyond the stated GPU-detection fallback (e.g., new RigidEntity.get_height_at() / get_normal_at() terrain helpers). Either update the PR description/title to reflect these additions or split them into a separate PR to keep review scope focused and reduce merge risk.

Copilot uses AI. Check for mistakes.
----------
x : float
World x position.
y : float
World y position.

Returns
-------
height : float
Interpolated height at (x, y).
"""
if not hasattr(self, "terrain_hf"):
gs.raise_exception("This entity does not have a terrain height field.")

hf = self.terrain_hf
h_scale, v_scale = self.terrain_scale

# Transform world position to terrain local frame
terrain_pos = tensor_to_array(self.links[0].get_pos())
terrain_quat = tensor_to_array(self.links[0].get_quat())
local_pos = gu.inv_transform_by_trans_quat(np.array([x, y, 0.0]), terrain_pos, terrain_quat)

x_idx = local_pos[0] / h_scale
y_idx = local_pos[1] / h_scale

x0 = int(np.floor(x_idx))
y0 = int(np.floor(y_idx))
x1 = x0 + 1
y1 = y0 + 1

# hf is indexed as [row, col] where row corresponds to x and col to y
if x0 < 0 or y0 < 0 or x1 >= hf.shape[0] or y1 >= hf.shape[1]:
if 0 <= x0 < hf.shape[0] and 0 <= y0 < hf.shape[1]:
return hf[x0, y0] * v_scale + terrain_pos[2]
return terrain_pos[2]

tx = x_idx - x0
ty = y_idx - y0

h00 = hf[x0, y0]
h10 = hf[x1, y0]
h01 = hf[x0, y1]
h11 = hf[x1, y1]

h = (1 - tx) * (1 - ty) * h00 + tx * (1 - ty) * h10 + (1 - tx) * ty * h01 + tx * ty * h11
return h * v_scale + terrain_pos[2]

@gs.assert_built
def get_normal_at(self, x: float, y: float) -> np.ndarray:
"""
Get terrain surface normal at world position (x, y).

Computes normal by taking cross product of tangent vectors
in x and y directions from the height field gradient.

Parameters
----------
x : float
World x position.
y : float
World y position.

Returns
-------
normal : np.ndarray
Unit normal vector of shape (3,) at (x, y).
"""
if not hasattr(self, "terrain_hf"):
gs.raise_exception("This entity does not have a terrain height field.")

hf = self.terrain_hf
h_scale, v_scale = self.terrain_scale

# Transform world position to terrain local frame
terrain_pos = tensor_to_array(self.links[0].get_pos())
terrain_quat = tensor_to_array(self.links[0].get_quat())
local_pos = gu.inv_transform_by_trans_quat(np.array([x, y, 0.0]), terrain_pos, terrain_quat)

x_idx = local_pos[0] / h_scale
y_idx = local_pos[1] / h_scale

x0 = int(np.floor(x_idx))
y0 = int(np.floor(y_idx))
x1 = x0 + 1
y1 = y0 + 1

# hf is indexed as [row, col] where row corresponds to x and col to y
if x0 < 0 or y0 < 0 or x1 >= hf.shape[0] or y1 >= hf.shape[1]:
normal_local = np.array([0.0, 0.0, 1.0])
return gu.transform_by_quat(normal_local, terrain_quat)

tx = x_idx - x0
ty = y_idx - y0

h00 = hf[x0, y0]
h10 = hf[x1, y0]
h01 = hf[x0, y1]
h11 = hf[x1, y1]

dz_dx = ((1 - ty) * (h10 - h00) + ty * (h11 - h01)) * v_scale / h_scale
dz_dy = ((1 - tx) * (h01 - h00) + tx * (h11 - h10)) * v_scale / h_scale

normal_local = np.array([-dz_dx, -dz_dy, 1.0])
normal_norm = np.linalg.norm(normal_local)
if normal_norm > 1e-8:
normal_local = normal_local / normal_norm
else:
normal_local = np.array([0.0, 0.0, 1.0])

# Transform normal from terrain local frame to world frame
return gu.transform_by_quat(normal_local, terrain_quat)

# ------------------------------------------------------------------------------------
# ----------------------------------- properties -------------------------------------
# ------------------------------------------------------------------------------------
Expand Down
76 changes: 75 additions & 1 deletion genesis/engine/sensors/contact_force.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,7 @@ class ContactForceSensorMetadata(RigidSensorMetadataMixin, NoisySensorMetadataMi

min_force: torch.Tensor = make_tensor_field((0, 3))
max_force: torch.Tensor = make_tensor_field((0, 3))
history_length: int = 1


class ContactForceSensor(
Expand All @@ -185,6 +186,74 @@ def __init__(self, options: ContactForceSensorOptions, sensor_idx: int, sensor_m

self.debug_object: "Mesh" | None = None

@gs.assert_built
def read(self, envs_idx=None) -> torch.Tensor:
"""
Read the sensor data (with noise applied if applicable).
"""
envs_idx = self._sanitize_envs_idx(envs_idx)
history_length = self._options.history_length

buffered_data = self._manager._buffered_data[gs.tc_float]
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Read history from processed cache, not raw buffer

The history branch reads from _buffered_data, but _update_shared_cache() writes shared_ground_truth_cache into that ring buffer before applying delay, noise, clipping, and quantization. So read() returns raw undelayed forces whenever history_length > 1, which silently changes semantics versus the history_length == 1 path and contradicts the method contract (“with noise applied if applicable”).

Useful? React with 👍 / 👎.

cache_slice = slice(self._cache_idx, self._cache_idx + 3)

if history_length == 1:
return self._get_formatted_data(self._manager.get_cloned_from_cache(self), envs_idx)

n_envs = self._manager._sim.n_envs
# Determine actual number of envs being queried
if envs_idx is None:
n_query_envs = n_envs if n_envs > 0 else 0
else:
n_query_envs = len(envs_idx)

history_data = []
for i in range(history_length):
hist = buffered_data.at(i, envs_idx, cache_slice)
if n_envs == 0:
hist = hist.reshape(3)
else:
hist = hist.reshape(n_query_envs, 3)
history_data.append(hist)

result = torch.stack(history_data, dim=1)
return result.squeeze(1) if n_envs == 0 else result
Comment on lines +219 to +220
Copy link

Copilot AI Apr 11, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the non-batched case (sim.n_envs == 0) and history_length > 1, torch.stack(history_data, dim=1) stacks 1D tensors of shape (3,) along dim=1, producing shape (3, history_length) (transposed vs the docstring expectation). If you want (history_length, 3) for the single-env case, stack along dim=0 (or ensure tensors are shaped (1,3) before stacking).

Suggested change
result = torch.stack(history_data, dim=1)
return result.squeeze(1) if n_envs == 0 else result
stack_dim = 0 if n_envs == 0 else 1
result = torch.stack(history_data, dim=stack_dim)
return result

Copilot uses AI. Check for mistakes.

@gs.assert_built
def read_ground_truth(self, envs_idx=None) -> torch.Tensor:
"""
Read the ground truth sensor data (without noise).
"""
envs_idx = self._sanitize_envs_idx(envs_idx)
history_length = self._options.history_length

# Get ground truth from the ground truth cache (no noise/delay/quantization)
gt_cache = self._manager.get_cloned_from_cache(self, is_ground_truth=True)
cache_slice = slice(self._cache_idx, self._cache_idx + 3)

if history_length == 1:
return self._get_formatted_data(gt_cache, envs_idx)

# For history, read from the buffered ground truth data
buffered_data = self._manager._buffered_data[gs.tc_float]
n_envs = self._manager._sim.n_envs
if envs_idx is None:
n_query_envs = n_envs if n_envs > 0 else 0
else:
n_query_envs = len(envs_idx)

history_data = []
for i in range(history_length):
hist = buffered_data.at(i, envs_idx, cache_slice)
if n_envs == 0:
hist = hist.reshape(3)
else:
hist = hist.reshape(n_query_envs, 3)
history_data.append(hist)

result = torch.stack(history_data, dim=1)
return result.squeeze(1) if n_envs == 0 else result

def build(self):
super().build()

Expand All @@ -197,6 +266,7 @@ def build(self):
self._shared_metadata.max_force = concat_with_tensor(
self._shared_metadata.max_force, self._options.max_force, expand=(1, 3)
)
self._shared_metadata.history_length = max(self._shared_metadata.history_length, self._options.history_length)

def _get_return_format(self) -> tuple[int, ...]:
return (3,)
Expand Down Expand Up @@ -284,7 +354,11 @@ def _draw_debug(self, context: "RasterizerContext"):
pos = self._link.get_pos(env_idx).reshape((3,))
quat = self._link.get_quat(env_idx).reshape((4,))

force = self.read(env_idx).reshape((3,))
cache = self._manager.get_cloned_from_cache(self, is_ground_truth=False)
if env_idx is not None:
force = cache[env_idx, :3].reshape((3,))
else:
force = cache[0, :3].reshape((3,))
vec = tensor_to_array(transform_by_quat(force * self._options.debug_scale, quat))

if self.debug_object is not None:
Expand Down
4 changes: 3 additions & 1 deletion genesis/engine/sensors/sensor_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,9 @@ def build(self):
update_ground_truth_only &= sensor._options.update_ground_truth_only
sensor._cache_idx = cache_size_per_dtype[dtype]
cache_size_per_dtype[dtype] += sensor._cache_size
max_buffer_len = max(max_buffer_len, sensor._delay_ts + 1)

history_length = getattr(sensor._options, "history_length", 1)
max_buffer_len = max(max_buffer_len, sensor._delay_ts + 1, history_length)
self._should_update_cache_by_type[sensor_cls] = not update_ground_truth_only

cls_cache_end_idx = cache_size_per_dtype[dtype]
Expand Down
6 changes: 6 additions & 0 deletions genesis/options/sensors/options.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
NonNegativeFloat,
NonNegativeInt,
PositiveFloat,
PositiveInt,
RotationMatrixType,
UnitIntervalVec3Type,
UnitIntervalVec4Type,
Expand Down Expand Up @@ -182,6 +183,9 @@ class ContactForce(RigidSensorOptionsMixin["ContactForceSensor"], NoisySensorOpt
The minimum detectable absolute force per each axis. Values below this will be treated as 0. Default is 0.
max_force : float | array-like[float, float, float], optional
The maximum output absolute force per each axis. Values above this will be clipped. Default is infinity.
history_length : int, optional
The number of historical force readings to store and return. Default is 1 (current value only).
When > 1, the sensor returns a history buffer of shape (history_length, 3) per environment.
Copy link

Copilot AI Apr 11, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The docstring says the history buffer is of shape (history_length, 3) per environment, but the sensor system generally returns batched tensors shaped (n_envs, ...) when n_envs > 0. Please clarify the expected return shape for history_length > 1 (e.g., (n_envs, history_length, 3) for batched, (history_length, 3) for single-env) and ensure the implementation matches it.

Suggested change
When > 1, the sensor returns a history buffer of shape (history_length, 3) per environment.
When > 1, the sensor returns a history buffer with shape (history_length, 3) for a single environment,
or (n_envs, history_length, 3) when returned in batched form across multiple environments.

Copilot uses AI. Check for mistakes.
debug_color : array-like[float, float, float, float], optional
The rgba color of the debug arrow. Defaults to (1.0, 0.0, 1.0, 0.5).
debug_scale : float, optional
Expand All @@ -193,6 +197,8 @@ class ContactForce(RigidSensorOptionsMixin["ContactForceSensor"], NoisySensorOpt
min_force: LaxNonNegativeUnboundedVec3FType = 0.0
max_force: LaxNonNegativeUnboundedVec3FType = np.inf

history_length: PositiveInt = 1

debug_color: UnitIntervalVec4Type = (1.0, 0.0, 1.0, 0.5)
debug_scale: PositiveFloat = 0.01

Expand Down
52 changes: 42 additions & 10 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,11 +240,22 @@ def _get_gpu_indices():
try:
return tuple(range(len(os.listdir(nvidia_gpu_interface_path))))
except FileNotFoundError:
warnings.warn(
f"'{nvidia_gpu_interface_path}' is not available. Multi-GPU support will be disabled. This is expected "
"on WSL2 where the NVIDIA proc interface is not mounted.",
stacklevel=2,
)
# Fallback to nvidia-smi if /proc interface is not available
try:
output = (
subprocess.check_output(["nvidia-smi", "--list-gpus"], stderr=subprocess.STDOUT, timeout=10)
.decode("utf-8")
.strip()
)
# Parse output like "GPU 0: NVIDIA RTX A6000 (UUID: GPU-xxxx)"
gpu_lines = [line for line in output.split("\n") if line.startswith("GPU")]
return tuple(range(len(gpu_lines)))
except (subprocess.SubprocessError, FileNotFoundError):
warnings.warn(
f"'{nvidia_gpu_interface_path}' is not available and nvidia-smi failed. Multi-GPU support will be disabled. This is expected "
"on WSL2 where the NVIDIA proc interface is not mounted.",
stacklevel=2,
)

return (0,)

Expand All @@ -267,11 +278,32 @@ def _torch_get_gpu_idx(device):
if re.search(rf"GPU UUID:\s+GPU-{device_uuid}", device_info):
return device_idx
except FileNotFoundError:
warnings.warn(
f"'{nvidia_gpu_interface_path}' is not available. Multi-GPU support will be disabled. This is expected "
"on WSL2 where the NVIDIA proc interface is not mounted.",
stacklevel=2,
)
# Fallback to nvidia-smi if /proc interface is not available
try:
import subprocess

output = (
subprocess.check_output(
["nvidia-smi", "--query-gpu=uuid", "--format=csv,noheader,nounits"],
stderr=subprocess.STDOUT,
timeout=10,
)
.decode("utf-8")
.strip()
)
# Parse output like "GPU-xxxx\nGPU-yyyy\n..."
uuids = [line.strip() for line in output.split("\n") if line.strip()]
for device_idx, uuid in enumerate(uuids):
if uuid == f"GPU-{device_uuid}":
return device_idx
# If not found, return -1 to indicate error
return -1
except (subprocess.SubprocessError, FileNotFoundError):
warnings.warn(
f"'{nvidia_gpu_interface_path}' is not available and nvidia-smi failed. Multi-GPU support will be disabled. This is expected "
"on WSL2 where the NVIDIA proc interface is not mounted.",
stacklevel=2,
)

return -1

Expand Down