Anonymous
Перемещайте камеру по координатам x, y, z и ее матрице вращения и сможете генерировать проекцию камеры на основе реально
Сообщение
Anonymous » 27 сен 2024, 13:29
Я написал код для поиска проекции объекта, видимого с разных точек зрения камеры.
Код: Выделить всё
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from mpl_toolkits.mplot3d import Axes3D
def load_obj(filename):
vertices = []
faces = []
with open(filename, 'r') as file:
for line in file:
if line.startswith('v '):
vertices.append(list(map(float, line.strip().split()[1:])))
elif line.startswith('f '):
faces.append([int(face.split('/')[0]) - 1 for face in line.strip().split()[1:]])
return np.array(vertices), faces
def plot_camera_frustum(ax, camera_pos, camera_dir, up_vector, fov_h, fov_v, near_dist, far_dist, color='r'):
# Calculate camera axes
camera_right = np.cross(camera_dir, up_vector)
if np.allclose(camera_right, 0):
camera_right = np.array([1, 0, 0])
camera_up = np.cross(camera_right, camera_dir)
# Normalize vectors
camera_dir = camera_dir / (np.linalg.norm(camera_dir) + 1e-10)
camera_right = camera_right / (np.linalg.norm(camera_right) + 1e-10)
camera_up = camera_up / (np.linalg.norm(camera_up) + 1e-10)
# Calculate frustum corners
near_height = 2 * np.tan(np.radians(fov_v / 2)) * near_dist
near_width = 2 * np.tan(np.radians(fov_h / 2)) * near_dist
far_height = 2 * np.tan(np.radians(fov_v / 2)) * far_dist
far_width = 2 * np.tan(np.radians(fov_h / 2)) * far_dist
# Near plane corners
near_center = camera_pos + near_dist * camera_dir
near_top_left = near_center + (near_height / 2) * camera_up - (near_width / 2) * camera_right
near_top_right = near_center + (near_height / 2) * camera_up + (near_width / 2) * camera_right
near_bottom_left = near_center - (near_height / 2) * camera_up - (near_width / 2) * camera_right
near_bottom_right = near_center - (near_height / 2) * camera_up + (near_width / 2) * camera_right
# Far plane corners
far_center = camera_pos + far_dist * camera_dir
far_top_left = far_center + (far_height / 2) * camera_up - (far_width / 2) * camera_right
far_top_right = far_center + (far_height / 2) * camera_up + (far_width / 2) * camera_right
far_bottom_left = far_center - (far_height / 2) * camera_up - (far_width / 2) * camera_right
far_bottom_right = far_center - (far_height / 2) * camera_up + (far_width / 2) * camera_right
# Plot frustum
frustum_points = [near_top_left, near_top_right, near_bottom_right, near_bottom_left,
far_top_left, far_top_right, far_bottom_right, far_bottom_left]
frustum_faces = [[0, 1, 2, 3], [4, 5, 6, 7], [0, 4, 7, 3],
[1, 5, 6, 2], [0, 1, 5, 4], [3, 2, 6, 7]]
frustum_collection = Poly3DCollection(np.array(frustum_points)[frustum_faces], alpha=0.2, facecolor=color)
ax.add_collection3d(frustum_collection)
def plot_obj_with_camera(filename, translation, camera_pos, camera_target, fov_h, fov_v):
vertices, faces = load_obj(filename)
# Apply translation to vertices
vertices += translation
fig = plt.figure(figsize=(15, 6))
# 3D view subplot
ax1 = fig.add_subplot(121, projection='3d')
mesh = Poly3DCollection(vertices[faces], alpha=0.3)
face_color = [0, 1, 1] # Cyan color (RGB: 0, 1, 1)
mesh.set_facecolor(face_color)
ax1.add_collection3d(mesh)
# Set plot limits
ax1.set_xlim(0, 0.1)
ax1.set_ylim(0, 0.1)
ax1.set_zlim(0, 0.1)
# Calculate camera direction
camera_dir = camera_target - camera_pos
camera_dir = camera_dir / np.linalg.norm(camera_dir)
# Define up vector (assuming Z is up)
up_vector = np.array([0, 0, 1])
# Plot camera point
ax1.scatter(*camera_pos, color='r', s=100, label='Camera')
# Plot camera frustum
near_dist = 0.01
far_dist = 0.05
plot_camera_frustum(ax1, camera_pos, camera_dir, up_vector, fov_h, fov_v, near_dist, far_dist)
ax1.set_xlabel('X')
ax1.set_ylabel('Y')
ax1.set_zlabel('Z')
ax1.legend()
ax1.set_title('3D Object with Camera and View Frustum')
# Camera view subplot
ax2 = fig.add_subplot(122)
# Project vertices onto camera plane
camera_right = np.cross(camera_dir, up_vector)
if np.allclose(camera_right, 0):
# If camera_right is zero, choose a different up_vector
up_vector = np.array([0, 1, 0])
camera_right = np.cross(camera_dir, up_vector)
camera_up = np.cross(camera_right, camera_dir)
camera_right = camera_right / (np.linalg.norm(camera_right) + 1e-10)
camera_up = camera_up / (np.linalg.norm(camera_up) + 1e-10)
# Create camera transformation matrix
camera_matrix = np.column_stack((camera_right, camera_up, -camera_dir))
camera_translation = -camera_pos
# Transform vertices to camera space
vertices_camera = np.dot(vertices + camera_translation, camera_matrix.T)
# Project vertices onto 2D plane
vertices_2d = vertices_camera[:, :2] / (vertices_camera[:, 2, np.newaxis] + 1e-10)
# Print debug information
print("Vertices 2D shape:", vertices_2d.shape)
print("Vertices 2D min/max:", np.min(vertices_2d), np.max(vertices_2d))
# Plot 2D projection
for face in faces:
ax2.fill(vertices_2d[face, 0], vertices_2d[face, 1], alpha=0.3, color = 'cyan')
ax2.set_xlim(-2, 2) # Adjusted limits
ax2.set_ylim(-2, 2) # Adjusted limits
ax2.set_aspect('equal')
ax2.set_title("Camera View")
ax2.set_xlabel("X")
ax2.set_ylabel("Y")
plt.tight_layout()
plt.show()
# Usage
obj_filename = 'T_Joint_2a0e9f91/mesh/T_Joint_2a0e9f91.obj'
translation = np.array([0.01, 0.01, 0.0]) # Translation vector
camera_pos = np.array([0.03, 0.015, 0.03]) # Camera directly above the object
camera_target = np.array([0.04, 0.025, 0]) # Looking directly at the center of the object
fov_h = 42.62 # Horizontal field of view in degrees
fov_v = 56.31 # Vertical field of view in degrees
plot_obj_with_camera(obj_filename, translation, camera_pos, camera_target, fov_h, fov_v)
На данном изображении вы можете видеть, что мой объект на рисунке ниже не полностью находится в поле зрения камеры, но все же, когда я рисую проекцию объекта, который я вижу весь объект, как показано на рисунке 2. Почему это происходит и как это улучшить?
[img]
https://i.sstatic . net/MB9Wz7tp.png[/img]
Для простоты вы можете попробовать то же самое для куба вместо объекта из obj-файла.
Я думаю установки предела оси на втором графике таким же, как и размер усеченного конуса.
Подробнее здесь:
https://stackoverflow.com/questions/790 ... enerate-ca
1727432999
Anonymous
Я написал код для поиска проекции объекта, видимого с разных точек зрения камеры. [code]import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d.art3d import Poly3DCollection from mpl_toolkits.mplot3d import Axes3D def load_obj(filename): vertices = [] faces = [] with open(filename, 'r') as file: for line in file: if line.startswith('v '): vertices.append(list(map(float, line.strip().split()[1:]))) elif line.startswith('f '): faces.append([int(face.split('/')[0]) - 1 for face in line.strip().split()[1:]]) return np.array(vertices), faces def plot_camera_frustum(ax, camera_pos, camera_dir, up_vector, fov_h, fov_v, near_dist, far_dist, color='r'): # Calculate camera axes camera_right = np.cross(camera_dir, up_vector) if np.allclose(camera_right, 0): camera_right = np.array([1, 0, 0]) camera_up = np.cross(camera_right, camera_dir) # Normalize vectors camera_dir = camera_dir / (np.linalg.norm(camera_dir) + 1e-10) camera_right = camera_right / (np.linalg.norm(camera_right) + 1e-10) camera_up = camera_up / (np.linalg.norm(camera_up) + 1e-10) # Calculate frustum corners near_height = 2 * np.tan(np.radians(fov_v / 2)) * near_dist near_width = 2 * np.tan(np.radians(fov_h / 2)) * near_dist far_height = 2 * np.tan(np.radians(fov_v / 2)) * far_dist far_width = 2 * np.tan(np.radians(fov_h / 2)) * far_dist # Near plane corners near_center = camera_pos + near_dist * camera_dir near_top_left = near_center + (near_height / 2) * camera_up - (near_width / 2) * camera_right near_top_right = near_center + (near_height / 2) * camera_up + (near_width / 2) * camera_right near_bottom_left = near_center - (near_height / 2) * camera_up - (near_width / 2) * camera_right near_bottom_right = near_center - (near_height / 2) * camera_up + (near_width / 2) * camera_right # Far plane corners far_center = camera_pos + far_dist * camera_dir far_top_left = far_center + (far_height / 2) * camera_up - (far_width / 2) * camera_right far_top_right = far_center + (far_height / 2) * camera_up + (far_width / 2) * camera_right far_bottom_left = far_center - (far_height / 2) * camera_up - (far_width / 2) * camera_right far_bottom_right = far_center - (far_height / 2) * camera_up + (far_width / 2) * camera_right # Plot frustum frustum_points = [near_top_left, near_top_right, near_bottom_right, near_bottom_left, far_top_left, far_top_right, far_bottom_right, far_bottom_left] frustum_faces = [[0, 1, 2, 3], [4, 5, 6, 7], [0, 4, 7, 3], [1, 5, 6, 2], [0, 1, 5, 4], [3, 2, 6, 7]] frustum_collection = Poly3DCollection(np.array(frustum_points)[frustum_faces], alpha=0.2, facecolor=color) ax.add_collection3d(frustum_collection) def plot_obj_with_camera(filename, translation, camera_pos, camera_target, fov_h, fov_v): vertices, faces = load_obj(filename) # Apply translation to vertices vertices += translation fig = plt.figure(figsize=(15, 6)) # 3D view subplot ax1 = fig.add_subplot(121, projection='3d') mesh = Poly3DCollection(vertices[faces], alpha=0.3) face_color = [0, 1, 1] # Cyan color (RGB: 0, 1, 1) mesh.set_facecolor(face_color) ax1.add_collection3d(mesh) # Set plot limits ax1.set_xlim(0, 0.1) ax1.set_ylim(0, 0.1) ax1.set_zlim(0, 0.1) # Calculate camera direction camera_dir = camera_target - camera_pos camera_dir = camera_dir / np.linalg.norm(camera_dir) # Define up vector (assuming Z is up) up_vector = np.array([0, 0, 1]) # Plot camera point ax1.scatter(*camera_pos, color='r', s=100, label='Camera') # Plot camera frustum near_dist = 0.01 far_dist = 0.05 plot_camera_frustum(ax1, camera_pos, camera_dir, up_vector, fov_h, fov_v, near_dist, far_dist) ax1.set_xlabel('X') ax1.set_ylabel('Y') ax1.set_zlabel('Z') ax1.legend() ax1.set_title('3D Object with Camera and View Frustum') # Camera view subplot ax2 = fig.add_subplot(122) # Project vertices onto camera plane camera_right = np.cross(camera_dir, up_vector) if np.allclose(camera_right, 0): # If camera_right is zero, choose a different up_vector up_vector = np.array([0, 1, 0]) camera_right = np.cross(camera_dir, up_vector) camera_up = np.cross(camera_right, camera_dir) camera_right = camera_right / (np.linalg.norm(camera_right) + 1e-10) camera_up = camera_up / (np.linalg.norm(camera_up) + 1e-10) # Create camera transformation matrix camera_matrix = np.column_stack((camera_right, camera_up, -camera_dir)) camera_translation = -camera_pos # Transform vertices to camera space vertices_camera = np.dot(vertices + camera_translation, camera_matrix.T) # Project vertices onto 2D plane vertices_2d = vertices_camera[:, :2] / (vertices_camera[:, 2, np.newaxis] + 1e-10) # Print debug information print("Vertices 2D shape:", vertices_2d.shape) print("Vertices 2D min/max:", np.min(vertices_2d), np.max(vertices_2d)) # Plot 2D projection for face in faces: ax2.fill(vertices_2d[face, 0], vertices_2d[face, 1], alpha=0.3, color = 'cyan') ax2.set_xlim(-2, 2) # Adjusted limits ax2.set_ylim(-2, 2) # Adjusted limits ax2.set_aspect('equal') ax2.set_title("Camera View") ax2.set_xlabel("X") ax2.set_ylabel("Y") plt.tight_layout() plt.show() # Usage obj_filename = 'T_Joint_2a0e9f91/mesh/T_Joint_2a0e9f91.obj' translation = np.array([0.01, 0.01, 0.0]) # Translation vector camera_pos = np.array([0.03, 0.015, 0.03]) # Camera directly above the object camera_target = np.array([0.04, 0.025, 0]) # Looking directly at the center of the object fov_h = 42.62 # Horizontal field of view in degrees fov_v = 56.31 # Vertical field of view in degrees plot_obj_with_camera(obj_filename, translation, camera_pos, camera_target, fov_h, fov_v) [/code] На данном изображении вы можете видеть, что мой объект на рисунке ниже не полностью находится в поле зрения камеры, но все же, когда я рисую проекцию объекта, который я вижу весь объект, как показано на рисунке 2. Почему это происходит и как это улучшить? [img]https://i.sstatic. net/MB9Wz7tp.png[/img] Для простоты вы можете попробовать то же самое для куба вместо объекта из obj-файла. Я думаю установки предела оси на втором графике таким же, как и размер усеченного конуса. Подробнее здесь: [url]https://stackoverflow.com/questions/79028975/move-camera-through-x-y-z-and-its-rotation-matrix-and-be-able-to-generate-ca[/url]