@InProceedings{yang:cvpr04,
author = {Gehua Yang and C.\ V.\ Stewart},
title = {Covariance-driven mosaic formation from
sparsely-overlapping image sets with application to
retinal image mosaicing},
booktitle = {Computer Vision and Pattern Recognition, 2004. CVPR
2004. Proceedings of the 2004 IEEE Computer Society
Conference on},
year = 2004,
volume = 1,
pages = {804--810},
keywords = {covariance matrices, diseases, estimation theory,
eye, feature extraction, image matching, image
registration, image segmentation, minimisation,
patient treatment, Mahalanobis distance, constraints
extraction, correspondences, covariance driven
mosaic formation, geometric image transformations,
image pairs, mapping error covariance matrices,
matching estimation, minimization, retinal diseases,
retinal image mosaicing, robust estimation, seamless
mosaics, sparsely-overlapping image sets, transform
estimation},
abstract = {A new technique is presented for mosaicing
sparsely-overlapping image sets, with a target
application of assisting the diagnosis and treatment
of retinal diseases. The geometric image
transformations required to construct the mosaics
are estimated by (1) estimating the transformations
between as many pairs of images as possible, (2)
extracting sets of constraints (correspondences)from
the successfully registered image pairs, and (3)
using these constraint sets to simultaneously
(jointly) estimate the final
transformations. Unfortunately, this may not be
sufficient to construct seamless mosaics when two
images overlap but can not be successfully
registered (step 1). This paper presents a new
method to generate constraints between such image
pairs, and use these constraints to estimate a more
consistent set of transformations. For each pair
transformation parameter covariance matrices are
computed and used to estimate the mapping error
covariance matrices for individual features from one
image. These features are matched in the second
image by minimizing the resulting Mahalanobis
distance. The generated correspondences are
validated using robust estimation techniques and
used to refine the estimates. The steps of
covariance computation, matching, and transform
estimation are repeated for all relevant image pairs
until the final alignment converges. Results are
presented and evaluated for several difficult image
sets to illustrate the efficacy of the techniques.},
issn = {1063-6919},
annote = {}
}