Publications by Kristina Monakhova
2022
Kristina Monakhova
Physics-Informed Machine Learning for Computational Imaging PhD Thesis
EECS Department, University of California, Berkeley, 2022.
@phdthesis{Monakhova:EECS-2022-177,
title = {Physics-Informed Machine Learning for Computational Imaging},
author = {Kristina Monakhova},
url = {http://www2.eecs.berkeley.edu/Pubs/TechRpts/2022/EECS-2022-177.html},
year = {2022},
date = {2022-07-01},
urldate = {2022-07-01},
number = {UCB/EECS-2022-177},
school = {EECS Department, University of California, Berkeley},
abstract = {A key aspect of many computational imaging systems, from compressive cameras to low light photography, are the algorithms used to uncover the signal from encoded or noisy measurements. Some computational cameras encode higher-dimensional information (e.g. different wavelengths of light, 3D, time) onto a 2-dimensional sensor, then use algorithms to decode and recover this high-dimensional information. Others capture measurements that are extremely noisy, or degraded, and require algorithms to extract the signal and make the images usable by people, or by higher-level downstream algorithms. In each case, the algorithms used to decode and extract information from raw measurements are critical and necessary to make computational cameras function. Over the years the predominant methods, classic methods, to recover information from computational cameras have been based on minimizing an optimization problem consisting of a data term and hand-picked prior term. More recently, deep learning has been applied to these problems, but often has no way to incorporate known optical characteristics, requires large training datasets, and results in black-box models that cannot easily be interpreted. In this dissertation, we present physics-informed machine learning for computational imaging, which is a middle ground approach that combines elements of classic methods with deep learning. We show how to incorporate knowledge of the imaging system physics into neural networks to improve image quality and performance beyond what is feasible with either classic or deep methods for several computational cameras. We show several different ways to incorporate imaging physics into neural networks, including algorithm unrolling, differentiable optical models, unsupervised methods, and through generative adversarial networks. For each of these methods, we focus on a different computational camera with unique challenges and modeling considerations. First, we introduce an unrolled, physics-informed network that improves the quality and reconstruction time of lensless cameras, improving these cameras and showing photorealistic image quality on a variety of scenes. Building up on this, we demonstrate a new reconstruction network that can improve the reconstruction time for compressive, single-shot 3D microscopy with spatially-varying blur by 1,600X, enabling interactive previewing of the scene. In cases where training data is hard to acquire, we show that an untrained physics-informed network can improve image quality for compressive single-shot video and hyperspectral imaging without the need for training data. Finally, we design a physics-informed noise generator that can realistically synthesize noise at extremely high-gain, low-light settings. Using this learned noise model, we show how we can push a camera past its typical limit and take photorealistic videos at starlight levels of illumination for the first time. Each case highlights how using physics-informed machine learning can improve computational cameras and push them to their limits.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Kristina Monakhova; Stephan R. Richter; Laura Waller; Vladlen Koltun
Dancing Under the Stars: Video Denoising in Starlight Inproceedings
In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 16241-16251, 2022.
@inproceedings{Monakhova_2022_CVPR,
title = {Dancing Under the Stars: Video Denoising in Starlight},
author = {Kristina Monakhova and Stephan R. Richter and Laura Waller and Vladlen Koltun},
url = {https://openaccess.thecvf.com/content/CVPR2022/html/Monakhova_Dancing_Under_the_Stars_Video_Denoising_in_Starlight_CVPR_2022_paper.html},
year = {2022},
date = {2022-06-01},
urldate = {2022-06-01},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
pages = {16241-16251},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kyrollos Yanny; Kristina Monakhova; Richard W Shuai; Laura Waller
Deep learning for fast spatially varying deconvolution Journal Article
In: Optica, vol. 9, no. 1, pp. 96–99, 2022.
@article{Yanny:22,
title = {Deep learning for fast spatially varying deconvolution},
author = {Kyrollos Yanny and Kristina Monakhova and Richard W Shuai and Laura Waller},
url = {http://www.osapublishing.org/optica/abstract.cfm?URI=optica-9-1-96},
doi = {10.1364/OPTICA.442438},
year = {2022},
date = {2022-01-01},
journal = {Optica},
volume = {9},
number = {1},
pages = {96--99},
publisher = {OSA},
abstract = {Deconvolution can be used to obtain sharp images or volumes from blurry or encoded measurements in imaging systems. Given knowledge of the system's point spread function (PSF) over the field of view, a reconstruction algorithm can be used to recover a clear image or volume. Most deconvolution algorithms assume shift-invariance; however, in realistic systems, the PSF varies laterally and axially across the field of view due to aberrations or design. Shift-varying models can be used, but are often slow and computationally intensive. In this work, we propose a deep-learning-based approach that leverages knowledge about the system's spatially varying PSFs for fast 2D and 3D reconstructions. Our approach, termed MultiWienerNet, uses multiple differentiable Wiener filters paired with a convolutional neural network to incorporate spatial variance. Trained using simulated data and tested on experimental data, our approach offers a 625textminus1600texttimes increase in speed compared to iterative methods with a spatially varying model, and outperforms existing deep-learning-based methods that assume shift invariance.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2021
Kristina Monakhova; Vi Tran; Grace Kuo; Laura Waller
Untrained networks for compressive lensless photography Journal Article
In: Opt. Express, vol. 29, no. 13, pp. 20913–20929, 2021.
@article{Monakhova:21,
title = {Untrained networks for compressive lensless photography},
author = {Kristina Monakhova and Vi Tran and Grace Kuo and Laura Waller},
url = {http://www.opticsexpress.org/abstract.cfm?URI=oe-29-13-20913},
doi = {10.1364/OE.424075},
year = {2021},
date = {2021-06-01},
journal = {Opt. Express},
volume = {29},
number = {13},
pages = {20913--20929},
publisher = {OSA},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2020
Kyrollos Yanny; Nick Antipa; William Liberti; Sam Dehaeck; Kristina Monakhova; Fanglin Linda Liu; Konlin Shen; Ren Ng; Laura Waller
Miniscope3D: optimized single-shot miniature 3D fluorescence microscopy Journal Article
In: Light: Science & Applications, vol. 9, no. 171, 2020.
@article{yanny2020,
title = {Miniscope3D: optimized single-shot miniature 3D fluorescence microscopy},
author = {Kyrollos Yanny and Nick Antipa and William Liberti and Sam Dehaeck and Kristina Monakhova and Fanglin Linda Liu and Konlin Shen and Ren Ng and Laura Waller},
url = {https://www.nature.com/articles/s41377-020-00403-7},
doi = {https://doi.org/10.1038/s41377-020-00403-7},
year = {2020},
date = {2020-10-02},
journal = {Light: Science & Applications},
volume = {9},
number = {171},
abstract = {Miniature fluorescence microscopes are a standard tool in systems biology. However, widefield miniature microscopes capture only 2D information, and modifications that enable 3D capabilities increase the size and weight and have poor resolution outside a narrow depth range. Here, we achieve the 3D capability by replacing the tube lens of a conventional 2D Miniscope with an optimized multifocal phase mask at the objective’s aperture stop. Placing the phase mask at the aperture stop significantly reduces the size of the device, and varying the focal lengths enables a uniform resolution across a wide depth range. The phase mask encodes the 3D fluorescence intensity into a single 2D measurement, and the 3D volume is recovered by solving a sparsity-constrained inverse problem. We provide methods for designing and fabricating the phase mask and an efficient forward model that accounts for the field-varying aberrations in miniature objectives. We demonstrate a prototype that is 17 mm tall and weighs 2.5 grams, achieving 2.76 μm lateral, and 15 μm axial resolution across most of the 900 × 700 × 390 μm3 volume at 40 volumes per second. The performance is validated experimentally on resolution targets, dynamic biological samples, and mouse brain tissue. Compared with existing miniature single-shot volume-capture implementations, our system is smaller and lighter and achieves a more than 2× better lateral and axial resolution throughout a 10× larger usable depth range. Our microscope design provides single-shot 3D imaging for applications where a compact platform matters, such as volumetric neural imaging in freely moving animals and 3D motion studies of dynamic samples in incubators and lab-on-a-chip devices.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kristina Monakhova; Kyrollos Yanny; Neerja Aggarwal; Laura Waller
Spectral DiffuserCam: lensless snapshot hyperspectral imaging with a spectral filter array Journal Article
In: Optica, vol. 7, no. 10, pp. 1298–1307, 2020.
@article{Monakhova:20b,
title = {Spectral DiffuserCam: lensless snapshot hyperspectral imaging with a spectral filter array},
author = {Kristina Monakhova and Kyrollos Yanny and Neerja Aggarwal and Laura Waller},
url = {http://www.osapublishing.org/optica/abstract.cfm?URI=optica-7-10-1298},
doi = {10.1364/OPTICA.397214},
year = {2020},
date = {2020-10-01},
journal = {Optica},
volume = {7},
number = {10},
pages = {1298--1307},
publisher = {OSA},
abstract = {Hyperspectral imaging is useful for applications ranging from medical diagnostics to agricultural crop monitoring; however, traditional scanning hyperspectral imagers are prohibitively slow and expensive for widespread adoption. Snapshot techniques exist but are often confined to bulky benchtop setups or have low spatio-spectral resolution. In this paper, we propose a novel, compact, and inexpensive computational camera for snapshot hyperspectral imaging. Our system consists of a tiled spectral filter array placed directly on the image sensor and a diffuser placed close to the sensor. Each point in the world maps to a unique pseudorandom pattern on the spectral filter array, which encodes multiplexed spatio-spectral information. By solving a sparsity-constrained inverse problem, we recover the hyperspectral volume with sub-super-pixel resolution. Our hyperspectral imaging framework is flexible and can be designed with contiguous or non-contiguous spectral filters that can be chosen for a given application. We provide theory for system design, demonstrate a prototype device, and present experimental results with high spatio-spectral resolution.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Grace Kuo; Kristina Monakhova; Kyrollos Yanny; Ren Ng; Laura Waller
Spatially-varying microscope calibration from unstructured sparse inputs Inproceedings
In: Imaging and Applied Optics Congress, pp. CF4C.4, Optical Society of America, 2020.
@inproceedings{Kuo:20,
title = {Spatially-varying microscope calibration from unstructured sparse inputs},
author = {Grace Kuo and Kristina Monakhova and Kyrollos Yanny and Ren Ng and Laura Waller},
url = {http://www.osapublishing.org/abstract.cfm?URI=COSI-2020-CF4C.4},
year = {2020},
date = {2020-01-01},
booktitle = {Imaging and Applied Optics Congress},
journal = {Imaging and Applied Optics Congress},
pages = {CF4C.4},
publisher = {Optical Society of America},
abstract = {We propose a method based on blind deconvolution to calibrate the spatially-varying point spread functions of a coded-aperture microscope system. From easy-to- acquire measurements of unstructured fluorescent beads, we recover a spatially-varying forward model that outperforms prior approaches.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ellin Zhao; Nicolas Deshler; Kristina Monakhova; Laura Waller
Multi-sensor lensless imaging: synthetic large-format sensing with a disjoint sensor array Inproceedings
In: Imaging and Applied Optics Congress, pp. CF2C.6, Optical Society of America, 2020.
@inproceedings{Zhao:20,
title = {Multi-sensor lensless imaging: synthetic large-format sensing with a disjoint sensor array},
author = {Ellin Zhao and Nicolas Deshler and Kristina Monakhova and Laura Waller},
url = {http://www.osapublishing.org/abstract.cfm?URI=COSI-2020-CF2C.6},
year = {2020},
date = {2020-01-01},
booktitle = {Imaging and Applied Optics Congress},
journal = {Imaging and Applied Optics Congress},
pages = {CF2C.6},
publisher = {Optical Society of America},
abstract = {We demonstrate a lensless diffuser-based camera array for large field-of-view imaging. Images are captured from multiple disjoint sensors and the synthetic large format sensing area is recovered by solving a compressive sensing inverse problem.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kristina Monakhova; Kyrollos Yanny; Laura Waller
Snapshot hyperspectral imaging using a random phase mask and spectral filter array Inproceedings
In: Imaging and Applied Optics Congress, pp. JF2F.4, Optical Society of America, 2020.
@inproceedings{Monakhova:20,
title = {Snapshot hyperspectral imaging using a random phase mask and spectral filter array},
author = {Kristina Monakhova and Kyrollos Yanny and Laura Waller},
url = {http://www.osapublishing.org/abstract.cfm?URI=COSI-2020-JF2F.4},
year = {2020},
date = {2020-01-01},
booktitle = {Imaging and Applied Optics Congress},
journal = {Imaging and Applied Optics Congress},
pages = {JF2F.4},
publisher = {Optical Society of America},
abstract = {We introduce a snapshot hyperspectral imager that uses a random phase mask, repeated spectral filter array, and compressive recovery to achieve high spatial and spectral resolution in a small form factor.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kyrollos Yanny; Nick Antipa; William Liberti; Sam Dehaeck; Kristina Monakhova; Fanglin Linda Liu; Konlin Shen; Ren Ng; Laura Waller
Compressed Sensing 3D Fluorescence Microscopy Using Optimized Phase Mask Inproceedings
In: Imaging and Applied Optics Congress, pp. CW4B.5, Optical Society of America, 2020.
@inproceedings{Yanny:20,
title = {Compressed Sensing 3D Fluorescence Microscopy Using Optimized Phase Mask},
author = {Kyrollos Yanny and Nick Antipa and William Liberti and Sam Dehaeck and Kristina Monakhova and Fanglin Linda Liu and Konlin Shen and Ren Ng and Laura Waller},
url = {http://www.osapublishing.org/abstract.cfm?URI=COSI-2020-CW4B.5},
year = {2020},
date = {2020-01-01},
booktitle = {Imaging and Applied Optics Congress},
journal = {Imaging and Applied Optics Congress},
pages = {CW4B.5},
publisher = {Optical Society of America},
abstract = {We demonstrate a single-shot miniature 3D computational microscope with an optimized phase encoder. Our method uses sparsity-based reconstruction to achieve a 2.76-m lateral and 15،nm axial resolution across most of the 900 x 700 x 390،nm3 volume.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2019
Kristina Monakhova; Joshua Yurtsever; Grace Kuo; Nick Antipa; Kyrollos Yanny; Laura Waller
Learned reconstructions for practical mask-based lensless imaging Journal Article
In: Optics express, vol. 27, no. 20, pp. 28075–28090, 2019.
@article{monakhova2019learned,
title = {Learned reconstructions for practical mask-based lensless imaging},
author = { Kristina Monakhova and Joshua Yurtsever and Grace Kuo and Nick Antipa and Kyrollos Yanny and Laura Waller},
url = {https://doi.org/10.1364/OE.27.028075},
doi = {10.1364/OE.27.028075},
year = {2019},
date = {2019-09-30},
journal = {Optics express},
volume = {27},
number = {20},
pages = {28075--28090},
publisher = {Optical Society of America},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kristina Monakhova; Joshua Yurtsever; Grace Kuo; Nick Antipa; Kyrollos Yanny; Laura Waller
Unrolled, model-based networks for lensless imaging Journal Article
In: 2019.
@article{monakhova2019unrolled,
title = {Unrolled, model-based networks for lensless imaging},
author = { Kristina Monakhova and Joshua Yurtsever and Grace Kuo and Nick Antipa and Kyrollos Yanny and Laura Waller},
url = {https://pdfs.semanticscholar.org/6a49/3ac2a0c8a3be888ece00b52bc1ec013df2bd.pdf},
year = {2019},
date = {2019-09-14},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kristina Monakhova; Nick Antipa; Laura Waller
Learning for lensless mask-based imaging Inproceedings
In: Computational Optical Sensing and Imaging, pp. CTu3A–2, Optical Society of America 2019.
@inproceedings{monakhova2019learning,
title = {Learning for lensless mask-based imaging},
author = { Kristina Monakhova and Nick Antipa and Laura Waller},
url = {https://www.osapublishing.org/abstract.cfm?uri=COSI-2019-CTu3A.2},
year = {2019},
date = {2019-06-24},
booktitle = {Computational Optical Sensing and Imaging},
pages = {CTu3A--2},
organization = {Optical Society of America},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}