Initial commit

This commit is contained in:
Thomas Forgione 2024-06-14 16:43:10 +02:00
commit 242595e119
14 changed files with 944 additions and 0 deletions

178
.gitignore vendored Normal file
View File

@ -0,0 +1,178 @@
data
# Created by https://www.toptal.com/developers/gitignore/api/python
# Edit at https://www.toptal.com/developers/gitignore?templates=python
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
### Python Patch ###
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
poetry.toml
# ruff
.ruff_cache/
# LSP config files
pyrightconfig.json
# End of https://www.toptal.com/developers/gitignore/api/python

29
calibration.py Executable file
View File

@ -0,0 +1,29 @@
#!/usr/bin/env python
import functools
import numpy as np
import os
import sys
from PIL import Image
def print_error(msg: str) -> None:
print('\x1b[1;31m[ERR]' + msg + '\x1b[0m', file=sys.stderr)
def main():
if len(sys.argv) < 2:
print_error('Expected path to images as argument')
sys.exit(1)
# Load images
input_dir = sys.argv[1]
images = [np.asarray(Image.open(os.path.join(input_dir, x))) for x in os.listdir(input_dir)]
# Max image
max_image = functools.reduce(np.maximum, images)
if __name__ == '__main__':
main()

34
utils/camera.py Normal file
View File

@ -0,0 +1,34 @@
import numpy as np
import utils.vector_utils as vector_utils
def build_K_matrix(focal_length, u0, v0):
"""
Build the camera intrinsic matrix.
Parameters:
focal_length (float): Focal length of the camera.
u0 (float): First coordinate of the principal point.
v0 (float): Seccond coordinate of the principal point.
Returns:
numpy.ndarray: Camera intrinsic matrix (3x3).
"""
K = np.asarray([[focal_length, 0, u0],
[0, focal_length, v0],
[0, 0, 1]])
return K
def get_camera_rays(points,K):
"""Computes the camera rays for a set of points given the camera matrix K.
Args:
points (Array ..., 2): Points in the image plane.
K (Array 3, 3): Camera intrinsic matrix.
Returns:
Array ..., 3: Camera rays corresponding to the input points.
"""
homogeneous = vector_utils.to_homogeneous(points)
inv_K = np.linalg.inv(K)
rays = np.einsum('ij,...j->...i',inv_K,homogeneous)
return rays

82
utils/gaussian.py Normal file
View File

@ -0,0 +1,82 @@
import numpy as np
import utils.quadratic_forms as quadratic_forms
def gaussian_pdf(mu,sigma,x):
"""Computes the PDF of a multivariate Gaussian distribution.
Args:
mu (Array ...,k): Mean vector.
sigma (Array ...,k,k): Covariance matrix.
x (Array ...,k): Input vector.
Returns:
Array ...: Value of the PDF.
"""
k = np.shape(x)[-1]
Q = np.linalg.inv(sigma)
normalization = np.reciprocal(np.sqrt(np.linalg.det(sigma)*np.power(2.0*np.pi,k)))
quadratic = quadratic_forms.evaluate_quadratic_form(Q,x-mu)
result = np.exp(-0.5*quadratic)*normalization
return result
def gaussian_estimation(x,weights):
"""Estimates the mean and covariance matrix of a Gaussian distribution.
Args:
x (Array ...,n,dim): Data points.
weights (Array ...,n): Weights for each data point.
Returns:
Array ...,dim: Estimated mean vector.
Array ...,dim,dim: Estimated covariance matrix.
"""
weights_sum = np.sum(weights,axis=-1)
mu = np.sum(x*np.expand_dims(weights,axis=-1),axis=-2)/np.expand_dims(weights_sum,axis=-1)
centered_x = x-np.expand_dims(mu,axis=-2)
sigma = np.einsum('...s,...si,...sj->...ij',weights,centered_x,centered_x)/np.expand_dims(weights_sum,axis=(-1,-2))
return mu,sigma
def gaussian_mixture_estimation(x,init_params,it=100):
"""Estimates the parameters of a k Gaussian mixture model using the EM algorithm.
Args:
x (Array ..., n, dim): Data points.
init_params (tuple): Initial parameters (pi, sigma, mu).
pi (Array ..., k): Initial mixture weights.
sigma (Array ..., k, dim, dim): Initial covariance matrices.
mu (Array ..., k, dim): Initial means.
it (int, optional): Number of iterations. Defaults to 100.
Returns:
Tuple[(Array ..., k), (Array ..., k, dim, dim), (Array ..., k, dim)]:
Estimated mixture weights,covariance matrices, means.
"""
pi,sigma,mu = init_params
for _ in range(it):
pdf = gaussian_pdf(np.expand_dims(mu,axis=-2),
np.expand_dims(sigma,axis=-3),
np.expand_dims(x,axis=-3))*np.expand_dims(pi,axis=-1)
weights = pdf/np.sum(pdf,axis=-2,keepdims=True)
pi=np.mean(weights,axis=-1)
mu,sigma = gaussian_estimation(x,weights)
return pi,sigma,mu
def maximum_likelihood(x,params):
"""Selects the best gaussian model for a point
Args:
x (Array ..., dim): Data points.
params (tuple): Gaussians parameters (pi, sigma, mu).
pi (Array ..., k): Mixture weights.
sigma (Array ..., k, dim, dim): Covariance matrices.
mu (Array ..., k, dim): Means.
Returns:
Array ...: integer in [0,k-1] giving the maximum likelihood model
"""
pi,sigma,mu = params
pdf = gaussian_pdf(mu,sigma,np.expand_dims(x,axis=-2))*pi
result = np.argmax(pdf,axis=-1)
return result

40
utils/geometric_fit.py Normal file
View File

@ -0,0 +1,40 @@
import numpy as np
import utils.kernels as kernels
import utils.vector_utils as vector_utils
import utils.quadratic_forms as quadratic_forms
import utils.kernels as kernels
def sphere_parameters_from_points(points):
"""evaluates sphere parameters from a set of points
Args:
points (Array ... npoints ndim): points used to fit the sphere, homogeneous coordinates
Returns:
Array ... ndim: coordinates of the center of the sphere
Array ...: values of radius of the sphere
"""
homogeneous = vector_utils.to_homogeneous(points)
Q = quadratic_forms.fit_quadratic_form(homogeneous)
scale = np.mean(np.diagonal(Q[...,:-1,:-1],axis1=-2,axis2=-1))
scaled_Q = Q*np.expand_dims(np.reciprocal(scale),axis=(-1,-2))
center = -(scaled_Q[...,-1,:-1]+scaled_Q[...,:-1,-1])/2
centered_norm = vector_utils.norm_vector(center)[0]
radius = np.sqrt(np.square(centered_norm)-scaled_Q[...,-1,-1])
return center,radius
def plane_parameters_from_points(points):
"""Computes the parameters of a plane from a set of points.
Args:
points (Array ..., dim): Coordinates of the points used to define the plane.
Returns:
Array ..., dim: Normal vector to the plane.
Array ...: Plane constant alpha.
"""
homogeneous = vector_utils.to_homogeneous(points)
E = np.einsum('...ki,...kj->...ij',homogeneous,homogeneous)
L = kernels.matrix_kernel(E)
n,alpha = L[...,:-1],L[...,-1]
return n, alpha

56
utils/image_loader.py Normal file
View File

@ -0,0 +1,56 @@
from PIL import Image
import functools
from tqdm import tqdm
import numpy as np
def loader(file_list):
"""
Load images from the file list, convert them to numpy arrays, and show a progress bar.
Parameters:
file_list (list of str): List of file paths to the images.
Returns:
map: A map object containing numpy arrays of the images.
"""
return map(np.asarray, map(Image.open, tqdm(file_list)))
def load_reduce(file_list, reducer):
"""Loads and reduces a list of image files using a reduction function.
Args:
file_list (list of str): List of paths to image files.
reducer (function): Function to reduce the images.
Returns:
array: Reduced image.
"""
reduced_image = functools.reduce(reducer, loader(file_list))
return reduced_image
def load_map(file_list, mapper):
"""Loads and maps a list of image files using a mapping function.
Args:
file_list (list of str): List of paths to image files.
mapper (function): Function to map the images.
Returns:
List of array: Mapped images.
"""
mapped_images = map(mapper, loader(file_list))
return mapped_images
def load_max_image(file_list):
"""Loads a list of image files and computes the element-wise maximum.
Args:
file_list (list of str): List of paths to image files.
Returns:
array: Image with the element-wise maximum values.
"""
max_image = load_reduce(file_list, np.maximum)
return max_image

103
utils/intersections.py Normal file
View File

@ -0,0 +1,103 @@
import numpy as np
import utils.vector_utils as vector_utils
import utils.kernels as kernels
def lines_intersections_system(points,directions):
"""computes the system of equations for intersections of lines, Ax=b
where x is the instersection
Args:
points (Array ..., npoints, ndim): points through which the lines pass
directions (Array ..., npoints, ndim): direction vectors of the lines
Returns:
Array ..., 3*npoints, ndim: coefficient matrix A for the system of equations
Array ..., 3*npoints: right-hand side vector b for the system of equations
"""
n = vector_utils.norm_vector(directions)[1]
skew = np.swapaxes(vector_utils.cross_to_skew_matrix(n),-1,-2)
root = np.einsum('...uij,...uj->...ui',skew,points)
A = np.concatenate(np.moveaxis(skew,-3,0),axis=-2)
b = np.concatenate(np.moveaxis(root,-2,0),axis=-1)
return A,b
def lines_intersections(points,directions):
"""computes the intersections of lines
Args:
points (Array ..., npoints, ndim): points through which the lines pass
directions (Array ..., npoints, ndim): direction vectors of the lines
Returns:
Array ..., ndim: intersection
"""
A,b = lines_intersections_system(points,directions)
x = kernels.iteratively_reweighted_least_squares(A,b)
return x
def line_sphere_intersection_determinant(center,radius,directions):
"""computes the determinant for the intersection of a line and a sphere,
Args:
center (Array ..., dim): center of the sphere
radius (Array ...): radius of the sphere
directions (Array ..., dim): direction of the line
Returns:
Array ...:intersection determinant
"""
directions_norm_2 = np.square(vector_utils.norm_vector(directions)[0])
center_norm_2 = np.square(vector_utils.norm_vector(center)[0])
dot_product_2 = np.square(vector_utils.dot_product(center,directions))
delta = dot_product_2-directions_norm_2*(center_norm_2-np.square(radius))
return delta
def line_plane_intersection(normal,alpha,directions):
"""Computes the intersection points between a line and a plane.
Args:
normal (Array ..., ndim): Normal vector to the plane.
alpha (Array ...): Plane constant alpha.
directions (Array ..., dim): direction of the line
Returns:
Array ..., ndim: Intersection points between the line and the sphere.
"""
t = -alpha*np.reciprocal(vector_utils.dot_product(directions,normal))
intersection = directions*t[...,np.newaxis]
return intersection
def line_sphere_intersection(center,radius,directions):
"""Computes the intersection points between a line and a sphere.
Args:
center (Array ..., ndim): Center of the sphere.
radius (Array ...): Radius of the sphere.
directions (Array ..., ndim): Direction vectors of the line.
Returns:
Array ..., ndim: Intersection points between the line and the sphere.
Array bool ...: Mask of intersection points
"""
delta = line_sphere_intersection_determinant(center,radius,directions)
mask = delta>0
dot_product = vector_utils.dot_product(center,directions)
directions_norm_2 = np.square(vector_utils.norm_vector(directions)[0])
distances = (dot_product-np.sqrt(np.maximum(0,delta)))*np.reciprocal(directions_norm_2)
intersection = np.expand_dims(distances,axis=-1)*directions
return intersection,mask
def sphere_intersection_normal(center,point):
"""Computes the normal vector at the intersection point on a sphere.
Args:
center (Array ..., dim): Coordinates of the sphere center.
point (Array ..., dim): Coordinates of the intersection point.
Returns:
Array ..., dim: Normal normal vector at the intersection point.
"""
vector = point-center
normal = vector_utils.norm_vector(vector)[1]
return normal

83
utils/kernels.py Normal file
View File

@ -0,0 +1,83 @@
import numpy as np
import utils.vector_utils as vector_utils
def weighted_least_squares(A,y,weights):
"""Computes the weighted least squares solution of Ax=y.
Args:
A (Array ...,u,v): Design matrix.
y (Array ...,u): Target values.
weights (Array ...,u): Weights for each equation.
Returns:
Array ...,v : Weighted least squares solution.
"""
pinv = np.linalg.pinv(A*weights[...,np.newaxis])
result = np.einsum('...uv,...v->...u',pinv,y*weights)
return result
def least_squares(A,y):
"""Computes the least squares solution of Ax=y.
Args:
A (Array ...,u,v): Design matrix.
y (Array ...,u): Target values.
Returns:
Array ...,v : Least squares solution.
"""
result = weighted_least_squares(A,y,np.ones(A.shape[0]))
return result
def iteratively_reweighted_least_squares(A,y, epsilon=1e-5, it=20):
"""Computes the iteratively reweighted least squares solution. of Ax=y
Args:
A (Array ..., u, v): Design matrix.
y (Array ..., u): Target values.
epsilon (float, optional): Small value to avoid division by zero. Defaults to 1e-5.
it (int, optional): Number of iterations. Defaults to 20.
Returns:
Array ..., v: Iteratively reweighted least squares solution.
"""
weights = np.ones(y.shape)
for _ in range(it):
result = weighted_least_squares(A,y,weights)
ychap = np.einsum('...uv,...v->...u',A,result)
delta = np.abs(ychap-y)
weights = np.reciprocal(np.maximum(epsilon,np.sqrt(delta)))
return result
def matrix_kernel(A):
"""Computes the eigenvector corresponding to the smallest eigenvalue of the matrix A.
Args:
A (Array ..., n, n): Input square matrix.
Returns:
Array ..., n: Eigenvector corresponding to the smallest eigenvalue.
"""
eigval, eigvec = np.linalg.eig(A)
min_index = np.argmin(np.abs(eigval),axis=-1)
min_eigvec = np.take_along_axis(eigvec,min_index[...,None,None],-1)[...,0]
normed_eigvec = vector_utils.norm_vector(min_eigvec)[1]
return normed_eigvec
def masked_least_squares(A,y,mask):
"""Computes the least squares solution of Ax = y for masked data.
Args:
A (Array ..., n, p): Design matrix.
y (Array ..., n): Target values.
mask (Array ..., n, bool): Mask to select valid data points.
Returns:
Array ..., p: Least squares solution for the masked data.
"""
masked_solver = lambda A,y,mask : least_squares(A[mask,:],y[mask])
vectorized = np.vectorize(masked_solver,signature='(n,p),(n),(n)->(p)')
result = vectorized(A,y,mask)
return result

35
utils/mashal.py Normal file
View File

@ -0,0 +1,35 @@
import numpy as np
def marshal_arrays(arrays):
"""
Flatten a list of numpy arrays and store their shapes.
Parameters:
arrays (list of np.ndarray): List of numpy arrays to be marshalled.
Returns:
tuple: A tuple containing:
- flat (np.ndarray): A single concatenated numpy array of all elements.
- shapes (list of tuple): A list of shapes of the original arrays.
"""
flattened = list(map(lambda a : np.reshape(a,-1),arrays))
shapes = list(map(np.shape,arrays))
flat = np.concatenate(flattened)
return flat, shapes
def unmarshal_arrays(flat,shapes):
"""
Rebuild the original list of numpy arrays from the flattened array and shapes.
Parameters:
flat (np.ndarray): The single concatenated numpy array of all elements.
shapes (list of tuple): A list of shapes of the original arrays.
Returns:
list of np.ndarray: The list of original numpy arrays.
"""
sizes = list(map(np.prod,shapes))
splits = np.cumsum(np.asarray(sizes,dtype=int))[:-1]
flattened = np.split(flat,splits)
arrays = list(map(lambda t : np.reshape(t[0],t[1]),zip(flattened,shapes)))
return arrays

48
utils/masks.py Normal file
View File

@ -0,0 +1,48 @@
import numpy as np
import scipy.ndimage as ndimage
def get_greatest_components(mask, n):
"""
Extract the n largest connected components from a binary mask.
Parameters:
mask (Array ...): The binary mask.
n (int): The number of largest connected components to extract.
Returns:
Array n,...: A boolean array of the n largest connected components
"""
labeled, _ = ndimage.label(mask)
unique, counts = np.unique(labeled, return_counts=True)
greatest_labels = unique[unique != 0][np.argsort(counts[unique != 0])[-n:]]
greatest_components = labeled[np.newaxis,...] == np.expand_dims(greatest_labels,axis=tuple(range(1,1+mask.ndim)))
return greatest_components
def get_mask_border(mask):
"""
Extract the border from a binary mask.
Parameters:
mask (Array ...): The binary mask.
Returns:
Array ...: A boolean array mask of the border
"""
inverted_mask = np.logical_not(mask)
dilated = ndimage.binary_dilation(inverted_mask)
border = np.logical_and(mask,dilated)
return border
def select_binary_mask(mask,metric):
"""Selects the side of a binary mask that optimizes the given metric.
Args:
mask (Array bool ...): Initial binary mask.
metric (function): Function to evaluate the quality of the mask.
Returns:
Array bool ...: Selected binary mask that maximizes the metric.
"""
inverted = np.logical_not(mask)
result = mask if metric(mask)>metric(inverted) else inverted
return result

66
utils/photometry.py Normal file
View File

@ -0,0 +1,66 @@
import numpy as np
import utils.kernels as kernels
import utils.vector_utils as vector_utils
def estimate_light(normals,grey_levels, treshold = (0,1)):
"""Estimates the light directions using the given normals, grey levels, and mask.
Args:
normals (Array ..., n, dim): Normal vectors.
grey_levels (Array ..., n): Grey levels corresponding to the normals.
threshold (tuple, optional): Intensity threshold for valid grey levels. Defaults to (0, 1).
Returns:
Array ..., dim: Estimated light directions.
"""
validity_mask = np.logical_and(grey_levels>treshold[0],grey_levels<treshold[1])
lights = kernels.weighted_least_squares(normals,grey_levels,validity_mask)
return lights
def geometric_shading_parameters(light_point, principal_directions, points):
"""Computes geometric parameters for shading based on light source and points.
Args:
light_point (Array ..., dim): Coordinates of the light source.
principal_directions (Array ..., dim): Principal directions of each light.
points (Array ..., dim): Coordinates of the points on the surface.
Returns:
Array ..., dim: Distances from each point to the light source.
Array ...: Computed light direction at each point.
Array ...: Angular factors based on the principal directions and light direction.
"""
distance, light_direction = vector_utils.norm_vector(light_point-points)
angular_factor = np.maximum(vector_utils.dot_product(principal_directions,-light_direction),0)
return distance, light_direction, angular_factor
def estimate_anisotropy(light_point, principal_directions, points,normals, grey_levels, min_grey_level = 0.1, min_dot_product = 0.2):
"""Estimates anisotropy parameters based on geometric shading and grey levels.
Args:
light_point (Array ..., dim): Coordinates of the light source.
principal_directions (Array ..., dim): Principal directions of each light.
points (Array ..., dim): Coordinates of the points on the surface.
normals (Array ..., dim): Normal vectors at each point.
grey_levels (Array ...): Observed grey levels at each point.
min_grey_level (float, optional): Minimum valid grey level. Defaults to 0.1.
min_dot_product (float, optional): Minimum valid dot product for shading and angular factors. Defaults to 0.2.
Returns:
Array ..., 1: Estimated anisotropy parameter mu.
Array ..., 1: Estimated flux parameter.
"""
distance, light_direction, angular_factor = geometric_shading_parameters(light_point, principal_directions, points)
computed_shading = np.maximum(vector_utils.dot_product(normals,light_direction),0)
validity_mask = np.logical_and(grey_levels>min_grey_level,np.logical_and(computed_shading>min_dot_product,angular_factor>min_dot_product))
log_flux = np.log(np.maximum(grey_levels,min_grey_level)*np.square(distance)*np.reciprocal(np.maximum(computed_shading,min_dot_product)))
log_factor = vector_utils.to_homogeneous(np.expand_dims(np.log(np.maximum(angular_factor,min_dot_product)),axis=-1))
eta = kernels.weighted_least_squares(log_factor,log_flux,validity_mask)
mu,log_phi = eta[...,0], eta[...,1]
estimated_flux = np.exp(log_phi)
return mu,estimated_flux
def light_conditions(light_point, principal_directions, points, mu, flux):
distance, light_direction, angular_factor = geometric_shading_parameters(light_point, principal_directions, points)
light_conditions = light_direction*(np.reciprocal(np.square(distance))*np.power(angular_factor,mu)*flux)[...,np.newaxis]
return light_conditions

98
utils/quadratic_forms.py Normal file
View File

@ -0,0 +1,98 @@
import numpy as np
import utils.kernels as kernels
import utils.vector_utils as vector_utils
def evaluate_bilinear_form(Q,left,right):
"""evaluates bilinear forms at several points
Args:
Q (Array ...,ldim,rdim): bilinear form to evaluate
left (Array ...,ldim): points where the bilinear form is evaluated to the left
right (Array ...,rdim): points where the bilinear form is evaluated to the right
Returns:
Array ... bilinear forms evaluated
"""
result = np.einsum('...ij,...i,...j->...',Q,left,right)
return result
def evaluate_quadratic_form(Q,points):
"""evaluates quadratic forms at several points
Args:
Q (Array ...,dim,dim): quadratic form to evaluate
points (Array ...,dim): points where the quadratic form is evaluated
Returns:
Array ... quadratic forms evaluated
"""
result = evaluate_bilinear_form(Q,points,points)
return result
def merge_quadratic_to_homogeneous(Q,b,c):
"""merges quadratic form, linear term, and constant term into a homogeneous matrix
Args:
Q (Array ..., dim, dim): quadratic form matrix
b (Array ..., dim): linear term vector
c (Array ...): constant term
Returns:
Array ..., dim+1, dim+1: homogeneous matrix representing the quadratic form
"""
dim_points = Q.shape[-1]
stack_shape = np.broadcast_shapes(np.shape(Q)[:-2],np.shape(b)[:-1],np.shape(c))
Q_b = np.broadcast_to(Q,stack_shape+(dim_points,dim_points))
b_b = np.broadcast_to(np.expand_dims(b,-1),stack_shape+(dim_points,1))
c_b = np.broadcast_to(np.expand_dims(c,(-1,-2)),stack_shape+(1,1))
H = np.block([[Q_b,0.5*b_b],[0.5*np.swapaxes(b_b,-1,-2),c_b]])
return H
def quadratic_to_dot_product(points):
"""computes the matrix W such that
x.T@Ax = W(x).T*A[ui,uj]
Args:
points ( Array ...,ndim): points of dimension ndim
Returns:
Array ...,ni: dot product matrix (W)
Array ni: i indices of central matrix
Array ni: j indices of central matrix
"""
dim_points = points.shape[-1]
ui,uj = np.triu_indices(dim_points)
W = points[...,ui]*points[...,uj]
return W,ui,uj
def fit_quadratic_form(points):
"""Fits a quadratic form to the given zeroes.
Args:
points (Array ..., n, dim): Input points.
Returns:
Array ..., dim, dim: Fitted quadratic form matrix.
"""
dim_points = points.shape[-1]
normed_points = vector_utils.norm_vector(points)[1]
W,ui,uj = quadratic_to_dot_product(normed_points)
H = np.einsum('...ki,...kj->...ij',W,W)
V0 = kernels.matrix_kernel(H)
Q = np.zeros(V0.shape[:-1]+(dim_points,dim_points))
Q[...,ui,uj]=V0
return Q
# import matplotlib.pyplot as plt
# Q = np.random.randn(3,3)
# x0, y0 = np.linspace(-1,1,300),np.linspace(-1,1,300)
# x,y = np.meshgrid(x0,y0,indexing='ij')
# points = vector_utils.to_homogeneous(np.stack([x,y],axis=-1))
# f = evaluate_quadratic_form(Q,points)
# mask = np.abs(f)<0.01
# u,v = np.where(mask)
# zeros = vector_utils.to_homogeneous(np.stack([x0[u],y0[v]],axis=-1))+np.random.randn(5,u.shape[0],3)*0.1
# Qc = fit_quadratic_form(zeros)
# fchap = evaluate_quadratic_form(Qc,points[...,None,:])
# print()

View File

@ -0,0 +1,23 @@
import numpy as np
import utils.vector_utils as vector_utils
def deproject_ellipse_to_sphere(M, radius):
"""finds the deprojection of an ellipse to a sphere
Args:
M (Array 3,3): Ellipse quadratic form
radius (float): radius of the researched sphere
Returns:
Array 3: solution of sphere centre location
"""
H = 0.5*(np.swapaxes(M,-1,-2)+M)
eigval, eigvec = np.linalg.eigh(H)
i_unique = np.argmax(np.abs(np.median(eigval,axis=-1,keepdims=True)-eigval),axis=-1)
unique_eigval = np.take_along_axis(eigval,i_unique[...,None],-1)[...,0]
unique_eigvec = np.take_along_axis(eigvec,i_unique[...,None,None],-1)[...,0]
double_eigval = 0.5*(np.sum(eigval,axis=-1)-unique_eigval)
z_sign = np.sign(unique_eigvec[...,-1])
dist = np.sqrt(1-double_eigval/unique_eigval)
C = np.real(radius*(dist*z_sign)[...,None]*vector_utils.norm_vector(unique_eigvec)[1])
return C

69
utils/vector_utils.py Normal file
View File

@ -0,0 +1,69 @@
import numpy as np
def norm_vector(v):
"""computes the norm and direction of vectors
Args:
v (Array ..., dim): vectors to compute the norm and direction for
Returns:
Array ...: norms of the vectors
Array ..., dim: unit direction vectors
"""
norm = np.linalg.norm(v,axis=-1)
direction = v/norm[...,np.newaxis]
return norm,direction
def dot_product(v1,v2):
"""Computes the dot product between two arrays of vectors.
Args:
v1 (Array ..., ndim): First array of vectors.
v2 (Array ..., ndim): Second array of vectors.
Returns:
Array ...: Dot product between v1 and v2.
"""
result = np.einsum('...i,...i->...',v1,v2)
return result
def cross_to_skew_matrix(v):
"""converts a vector cross product to a skew-symmetric matrix multiplication
Args:
v (Array ..., 3): vectors to convert
Returns:
Array ..., 3, 3: matrices corresponding to the input vectors
"""
indices = np.asarray([[-1,2,1],[2,-1,0],[1,0,-1]])
signs = np.asarray([[0,-1,1],[1,0,-1],[-1,1,0]])
skew_matrix = v[...,indices]*signs
return skew_matrix
def to_homogeneous(v):
"""converts vectors to homogeneous coordinates
Args:
v (Array ..., dim): input vectors
Returns:
Array ..., dim+1: homogeneous coordinates of the input vectors
"""
append_term = np.ones(np.shape(v)[:-1]+(1,))
homogeneous = np.append(v,append_term,axis=-1)
return homogeneous
def one_hot(i,imax):
"""Converts indices to one-hot encoded vectors.
Args:
i (Array ...): Array of indices.
imax (int): Number of classes.
Returns:
Array ..., imax: One-hot encoded vectors.
"""
result = np.arange(imax)==np.expand_dims(i,axis=-1)
return result