Theses
2023
Stuart Sherwin
Modeling, Designing, and Measuring EUV Photomasks PhD Thesis
EECS Department, University of California, Berkeley, 2023.
@phdthesis{Sherwin:EECS-2023-37,
title = {Modeling, Designing, and Measuring EUV Photomasks},
author = {Stuart Sherwin},
url = {http://www2.eecs.berkeley.edu/Pubs/TechRpts/2023/EECS-2023-37.html},
year = {2023},
date = {2023-05-01},
number = {UCB/EECS-2023-37},
school = {EECS Department, University of California, Berkeley},
abstract = {We present a selection of topics relating to modeling, designing, and measuring EUV (Extreme Ultraviolet) photomasks, with implications for high-volume nanofabrication of integrated circuits. These EUV photomasks must be accurately designed, but rigorously modeling large domains is extremely computationally intensive; we introduce an approximate Fresnel Double Scattering model which is 10,000x faster. This approximation can predict
the trend of phase vs pitch, which is critical to designing EUV phase shift masks (PSMs). We
also explore novel mask architectures to improve efficiency and contrast, such as an etched
multilayer PSM (up to 6x throughput but restrictive applicability), aperiodic multilayers
(up to +22% throughput and more general applicability), and multilayers with minimal
propagation distance at certain angles (lower throughput but higher contrast with minimized 3D effects). Finally we explore computational metrology with EUV reflectometry,
scatterometry, and imaging for probing the phase and amplitude response of an EUV mask, with experimental demonstrations at the Advanced Light Source synchrotron. We perform reflectometry experiments on 3 masks with different architectures to infer approximately 25
physical film parameters each. Another reflectometry application to contamination monitoring achieved single-picometer precision for thickness (3σ < 6pm) and sub-degree precision for phase (3σ < 0.2deg). We compare two implementations of phase scatterometry, either applying nonlinear optimization with approximate scattering, or linearizing the rigorous scattering relationship between intensity and phase; linearization is shown to generally be more accurate, but both methods have similar precision. We apply novel software and hardware for phase imaging, using PhaseLift convex phase retrieval, combined with a set of custom Zernike Phase Contrast (ZPC) zone plates. We perform hyperspectral ZPC phase imaging on 3 masks, where we see promising agreement with reflectometry in the trend of phase vs wavelength.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
the trend of phase vs pitch, which is critical to designing EUV phase shift masks (PSMs). We
also explore novel mask architectures to improve efficiency and contrast, such as an etched
multilayer PSM (up to 6x throughput but restrictive applicability), aperiodic multilayers
(up to +22% throughput and more general applicability), and multilayers with minimal
propagation distance at certain angles (lower throughput but higher contrast with minimized 3D effects). Finally we explore computational metrology with EUV reflectometry,
scatterometry, and imaging for probing the phase and amplitude response of an EUV mask, with experimental demonstrations at the Advanced Light Source synchrotron. We perform reflectometry experiments on 3 masks with different architectures to infer approximately 25
physical film parameters each. Another reflectometry application to contamination monitoring achieved single-picometer precision for thickness (3σ < 6pm) and sub-degree precision for phase (3σ < 0.2deg). We compare two implementations of phase scatterometry, either applying nonlinear optimization with approximate scattering, or linearizing the rigorous scattering relationship between intensity and phase; linearization is shown to generally be more accurate, but both methods have similar precision. We apply novel software and hardware for phase imaging, using PhaseLift convex phase retrieval, combined with a set of custom Zernike Phase Contrast (ZPC) zone plates. We perform hyperspectral ZPC phase imaging on 3 masks, where we see promising agreement with reflectometry in the trend of phase vs wavelength.
2022
Linda Liu
Single-Shot 3D Microscopy: Optics and Algorithms Co-Design PhD Thesis
EECS Department, University of California, Berkeley, 2022.
@phdthesis{Liu:EECS-2022-224,
title = {Single-Shot 3D Microscopy: Optics and Algorithms Co-Design},
author = {Linda Liu},
url = {http://www2.eecs.berkeley.edu/Pubs/TechRpts/2022/EECS-2022-224.html},
year = {2022},
date = {2022-09-01},
number = {UCB/EECS-2022-224},
school = {EECS Department, University of California, Berkeley},
abstract = {Computational imaging involves simultaneously designing optical hardware and reconstruction software. Such a co-design framework brings together the best of both worlds for an imaging system. The goal is to develop a high-speed, high-resolution, and large field-of-view microscope that can detect 3D fluorescence signals from single image acquisition. To achieve this goal, I propose a new method called Fourier DiffuserScope, a single-shot 3D fluorescent microscope that uses a phase mask (i.e., a diffuser with random microlenses) in the Fourier plane to encode 3D information, then computationally reconstructs the volume by solving a sparsity-constrained inverse problem.
In this dissertation, I will discuss the design principles of the Fourier DiffuserScope from three perspectives: first-principles optics, compressed sensing theory, and physics-based machine learning. First, in the heuristic design, the phase mask consists of randomly placed microlenses with varying focal lengths; the random positions provide a larger field-of-view compared to a conventional microlens array, and the diverse focal lengths improve the axial depth range. I then build an experimental system that achieves less than 3 um lateral and 4 um axial resolution over a 1000x1000x280 um^3 volume. Lastly, we use a differentiable forward model of Fourier DiffuserScope in conjunction with a differentiable reconstruction algorithm to jointly optimize both the phase mask surface profile and the reconstruction parameters. We validate our method in 2D and 3D single-shot imaging, where the optimized diffuser demonstrates improved reconstruction quality compared to previous heuristic designs.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
In this dissertation, I will discuss the design principles of the Fourier DiffuserScope from three perspectives: first-principles optics, compressed sensing theory, and physics-based machine learning. First, in the heuristic design, the phase mask consists of randomly placed microlenses with varying focal lengths; the random positions provide a larger field-of-view compared to a conventional microlens array, and the diverse focal lengths improve the axial depth range. I then build an experimental system that achieves less than 3 um lateral and 4 um axial resolution over a 1000x1000x280 um^3 volume. Lastly, we use a differentiable forward model of Fourier DiffuserScope in conjunction with a differentiable reconstruction algorithm to jointly optimize both the phase mask surface profile and the reconstruction parameters. We validate our method in 2D and 3D single-shot imaging, where the optimized diffuser demonstrates improved reconstruction quality compared to previous heuristic designs.
Kristina Monakhova
Physics-Informed Machine Learning for Computational Imaging PhD Thesis
EECS Department, University of California, Berkeley, 2022.
@phdthesis{Monakhova:EECS-2022-177,
title = {Physics-Informed Machine Learning for Computational Imaging},
author = {Kristina Monakhova},
url = {http://www2.eecs.berkeley.edu/Pubs/TechRpts/2022/EECS-2022-177.html},
year = {2022},
date = {2022-07-01},
urldate = {2022-07-01},
number = {UCB/EECS-2022-177},
school = {EECS Department, University of California, Berkeley},
abstract = {A key aspect of many computational imaging systems, from compressive cameras to low light photography, are the algorithms used to uncover the signal from encoded or noisy measurements. Some computational cameras encode higher-dimensional information (e.g. different wavelengths of light, 3D, time) onto a 2-dimensional sensor, then use algorithms to decode and recover this high-dimensional information. Others capture measurements that are extremely noisy, or degraded, and require algorithms to extract the signal and make the images usable by people, or by higher-level downstream algorithms. In each case, the algorithms used to decode and extract information from raw measurements are critical and necessary to make computational cameras function. Over the years the predominant methods, classic methods, to recover information from computational cameras have been based on minimizing an optimization problem consisting of a data term and hand-picked prior term. More recently, deep learning has been applied to these problems, but often has no way to incorporate known optical characteristics, requires large training datasets, and results in black-box models that cannot easily be interpreted. In this dissertation, we present physics-informed machine learning for computational imaging, which is a middle ground approach that combines elements of classic methods with deep learning. We show how to incorporate knowledge of the imaging system physics into neural networks to improve image quality and performance beyond what is feasible with either classic or deep methods for several computational cameras. We show several different ways to incorporate imaging physics into neural networks, including algorithm unrolling, differentiable optical models, unsupervised methods, and through generative adversarial networks. For each of these methods, we focus on a different computational camera with unique challenges and modeling considerations. First, we introduce an unrolled, physics-informed network that improves the quality and reconstruction time of lensless cameras, improving these cameras and showing photorealistic image quality on a variety of scenes. Building up on this, we demonstrate a new reconstruction network that can improve the reconstruction time for compressive, single-shot 3D microscopy with spatially-varying blur by 1,600X, enabling interactive previewing of the scene. In cases where training data is hard to acquire, we show that an untrained physics-informed network can improve image quality for compressive single-shot video and hyperspectral imaging without the need for training data. Finally, we design a physics-informed noise generator that can realistically synthesize noise at extremely high-gain, low-light settings. Using this learned noise model, we show how we can push a camera past its typical limit and take photorealistic videos at starlight levels of illumination for the first time. Each case highlights how using physics-informed machine learning can improve computational cameras and push them to their limits.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Regina Eckert
Robust 3D Quantitative Phase Imaging PhD Thesis
EECS Department, University of California, Berkeley, 2022.
@phdthesis{Eckert:EECS-2022-29,
title = {Robust 3D Quantitative Phase Imaging},
author = {Regina Eckert},
url = {http://www2.eecs.berkeley.edu/Pubs/TechRpts/2022/EECS-2022-29.html},
year = {2022},
date = {2022-05-01},
urldate = {2022-05-01},
number = {UCB/EECS-2022-29},
school = {EECS Department, University of California, Berkeley},
abstract = {Biomedical research relies upon quantitative imaging methods to measure functional and structural data about microscopic organisms. Recently-developed quantitative phase imaging (QPI) methods use jointly designed optical and computational systems to recover structural quantitative phase information for biological samples. However, these methods have not seen wide adoption in biological research because the optical systems can be difficult to use and the computational algorithms often require expert operation for consistently high-quality results. QPI systems are usually developed under a computational imaging framework, where the optical measurement system is jointly designed with the computational reconstruction algorithm. Designing QPI systems for robust and practical real-world use is often difficult, however, because each imaging and computational configuration has unique and difficult-to-quantify practical implications for the end-user.
In this dissertation, I present three frameworks for increasing the robustness and practicality of computational imaging systems, and I demonstrate the usefulness of these three frameworks by applying them to 2D and 3D quantitative phase imaging systems. First, algorithmic self-calibration directly recovers imaging system parameters from data measurements, doing away with the need for extensive pre-calibration steps and ensuring greater calibration accuracy for non-ideal, real-world systems. I present a robust and efficient self-calibration algorithm for angled coherent illumination, which has enabled new QPI system designs for 2D Fourier ptychographic microscopy (FPM) and 3D intensity optical diffraction tomography (ODT) that would have otherwise been infeasible. Second, increased measurement diversity better encodes useful information across measurements, which can reduce imaging system complexity, data requirements, and computation time. I present a novel pupil-coded intensity ODT system designed to increase measurement diversity of 3D refractive index (RI) information by including joint illumination- and detection-side coding for improved volumetric RI reconstructions. Finally, physics-based machine learning uses a data-driven approach to directly optimize imaging system parameters, which can improve imaging reconstructions and build intuition for better designs of complicated computational imaging systems. I show results from a physics-based machine learning algorithm to optimize pupil coding masks for 3D RI reconstructions of thick cell clusters in the pupil-coded intensity ODT system.
In addition, I provide practical methods for the design, calibration, and operation of Fourier ptychography, intensity-only ODT, and pupil-coded intensity ODT microscopes to aid in the future development of robust QPI systems. I additionally present a validation of joint system pupil recovery using FPM and a comparison of the accuracy and computational complexity of coherent light propagation models that are commonly used in 3D quantitative phase imaging. I also compare field-based 3D RI reconstructions to intensity-based RI reconstructions, concluding that the proposed pupil-coded intensity ODT system captures similarly diverse phase information to field-based ODT microscopes.
Throughout this work, I demonstrate that by using the frameworks of algorithmic self-calibration, increased system measurement diversity, and physics-based machine learning for computational imaging system design, we can develop more robust quantitative phase imaging systems that are practical for real-world use.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
In this dissertation, I present three frameworks for increasing the robustness and practicality of computational imaging systems, and I demonstrate the usefulness of these three frameworks by applying them to 2D and 3D quantitative phase imaging systems. First, algorithmic self-calibration directly recovers imaging system parameters from data measurements, doing away with the need for extensive pre-calibration steps and ensuring greater calibration accuracy for non-ideal, real-world systems. I present a robust and efficient self-calibration algorithm for angled coherent illumination, which has enabled new QPI system designs for 2D Fourier ptychographic microscopy (FPM) and 3D intensity optical diffraction tomography (ODT) that would have otherwise been infeasible. Second, increased measurement diversity better encodes useful information across measurements, which can reduce imaging system complexity, data requirements, and computation time. I present a novel pupil-coded intensity ODT system designed to increase measurement diversity of 3D refractive index (RI) information by including joint illumination- and detection-side coding for improved volumetric RI reconstructions. Finally, physics-based machine learning uses a data-driven approach to directly optimize imaging system parameters, which can improve imaging reconstructions and build intuition for better designs of complicated computational imaging systems. I show results from a physics-based machine learning algorithm to optimize pupil coding masks for 3D RI reconstructions of thick cell clusters in the pupil-coded intensity ODT system.
In addition, I provide practical methods for the design, calibration, and operation of Fourier ptychography, intensity-only ODT, and pupil-coded intensity ODT microscopes to aid in the future development of robust QPI systems. I additionally present a validation of joint system pupil recovery using FPM and a comparison of the accuracy and computational complexity of coherent light propagation models that are commonly used in 3D quantitative phase imaging. I also compare field-based 3D RI reconstructions to intensity-based RI reconstructions, concluding that the proposed pupil-coded intensity ODT system captures similarly diverse phase information to field-based ODT microscopes.
Throughout this work, I demonstrate that by using the frameworks of algorithmic self-calibration, increased system measurement diversity, and physics-based machine learning for computational imaging system design, we can develop more robust quantitative phase imaging systems that are practical for real-world use.
Gautam Gunjala
Towards diffraction-limited short-wavelength imaging systems PhD Thesis
EECS Department, University of California, Berkeley, 2022.
@phdthesis{Gunjala:EECS-2022-117,
title = {Towards diffraction-limited short-wavelength imaging systems},
author = {Gautam Gunjala},
url = {http://www2.eecs.berkeley.edu/Pubs/TechRpts/2022/EECS-2022-117.html},
year = {2022},
date = {2022-05-01},
urldate = {2022-05-01},
number = {UCB/EECS-2022-117},
school = {EECS Department, University of California, Berkeley},
abstract = {Modern applications of optics, especially those which require shorter wavelengths of light, place ever-increasing demands on the performance of optical tools and systems. Working with extreme ultraviolet, soft x-ray and hard x-ray light poses complex limitations and challenges to diagnosing and maintaining diffraction-limited performance by measuring and controlling optical aberrations. By utilizing computational methods such as optimization and machine learning, we show that some of these limitations can be circumvented without sacrificing accuracy or precision.
In this work, we discuss a method for aberration measurement that is based on an analysis of speckle images acquired in situ. By using a stationary random object, our method eliminates the need for precise manufacturing and alignment of a test target. Moreover, the method provides a full, dense characterization of the optical system under test using relatively few images. The method has been successfully applied to an EUV microscope system, and is shown to be accurate to within λ/180. We also discuss a method for aberration compensation via the characterization and control of an adaptive optical element for x-ray optical systems. Adaptive x-ray optics are a relatively new technology, and our work aims to enable their use within the specifications of synchrotron beamline systems. To this end, we demonstrate the ability to experimentally predict and control the behavior of the glancing-incidence deformable mirror surface to within 2 nm rms, allowing the application of sub-wavelength corrections to an incident wavefront.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
In this work, we discuss a method for aberration measurement that is based on an analysis of speckle images acquired in situ. By using a stationary random object, our method eliminates the need for precise manufacturing and alignment of a test target. Moreover, the method provides a full, dense characterization of the optical system under test using relatively few images. The method has been successfully applied to an EUV microscope system, and is shown to be accurate to within λ/180. We also discuss a method for aberration compensation via the characterization and control of an adaptive optical element for x-ray optical systems. Adaptive x-ray optics are a relatively new technology, and our work aims to enable their use within the specifications of synchrotron beamline systems. To this end, we demonstrate the ability to experimentally predict and control the behavior of the glancing-incidence deformable mirror surface to within 2 nm rms, allowing the application of sub-wavelength corrections to an incident wavefront.
Kyrollos Yanny
Optics and Algorithms for Designing Miniature Computational Cameras and Microscopes PhD Thesis
2022, ISBN: 9798352951132.
@phdthesis{nokey,
title = {Optics and Algorithms for Designing Miniature Computational Cameras and Microscopes},
author = {Kyrollos Yanny},
url = {https://www.proquest.com/dissertations-theses/optics-algorithms-designing-miniature/docview/2738519745/se-2},
isbn = {9798352951132},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {ProQuest Dissertations and Theses},
pages = {106},
abstract = {Traditional cameras and microscopes are often optimized to produce sharp 2D images of the object. These 2D images miss important information about the world (e.g. depth and spectrum). Access to this information can make a significant impact on fields such as neuroscience, medicine, and robotics. For example, volumetric neural imaging in freely moving animals requires compact head-mountable 3D microscopes and tumor classification in tissue benefits from access to spectral information. Modifications that enable capturing these extra dimensions often result in bulky, expensive, and complex imaging setups. In this dissertation, I focus on designing compact single-shot computational imaging systems that can capture high dimensional information (depth and spectrum) about the world. This is achieved by using a multiplexing optic as the image capture hardware and formulating image recovery as a convex optimization problem. First, I discuss designing a single-shot compact miniature 3D fluorescence microscope, termed Miniscope3D. By placing an optimized multifocal phase mask at the objective’s exit pupil, 3D fluorescence intensity is encoded into a single 2D measurement and the 3D volume can be recovered by solving a sparsity constrained inverse problem. This enables a 2.76 micron lateral and 15 micron axial resolution across 900x700x390 micron cubed volume at 40 volumes per second in a device smaller than a U.S. quarter. Second, I discuss designing a single-shot hyperspectral camera, termed Spectral DiffuserCam, by combining a diffuser with a tiled spectral filter array. This enables recovering a hyperspectral volume with higher spatial resolution than the spectral filter alone. The system is compact, flexible, and can be designed with contiguous or non-contiguous spectral filters tailored to a given application. Finally, the iterative reconstruction methods generally used for compressed sensing take thousands of iterations to converge and rely on hand-tuned priors. I discuss a deep learning architecture, termed MultiWienerNet, that uses multiple differentiable Wiener filters paired with a convolutional neural network to take into account the system’s spatially-varying point spread functions. The result is a 625-1600X increase in speed compared to iterative methods with spatially-varying models and better reconstruction quality than deep learning methods that assume shift invariance.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Henry B. Pinkard
Data-Driven Information-Optimal Computational Microscopy PhD Thesis
2022, ISBN: 9798380367639.
@phdthesis{nokey,
title = {Data-Driven Information-Optimal Computational Microscopy},
author = {Henry B. Pinkard},
url = {https://www.proquest.com/dissertations-theses/data-driven-information-optimal-computational/docview/2867205316/se-2},
isbn = {9798380367639},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {ProQuest Dissertations and Theses},
pages = {229},
abstract = {Optical microscopes have been an indispensable tool in biology and medicine for over three centuries. Unlike their simple predecessors, contemporary microscopes often employ complex robotic automation and customized algorithms. In the past decade, advances in high-performance computer processors, the ease of collecting massive datasets, and machine learning have created many new possibilities for data-driven approaches to microscope control and image analysis. This dissertation covers the challenges and opportunities in modern microscopy. First, it shows how neural networks can be used to create microscopes that adapt to the samples they are imaging in real time. For example, this paradigm can be used to quickly focus microscopes using inexpensive hardware or visualize developing immune responses at large scales. Next, new open-source software that facilitates development of these and other microscopy techniques is presented. Next, it turns to how microscopes can make measurements of the intrinsic optical properties of cells, from which their biological function can be inferred. Development of techniques that do so requires comparing approaches on standardized datasets, and the creation of such a dataset containing hundreds of thousands of images of single cells is described. Finally, a new theoretical framework for modeling the information transmission of both microscopes and image-processing algorithms is introduced. This perspective provides a new set of engineering principles for microscopes and opens a range of new research questions.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
2021
Yonghuan David Ren
Three-Dimensional Phase Contrast Electron Tomography For Multiple Scattering Samples PhD Thesis
EECS Department, University of California, Berkeley, 2021.
@phdthesis{Ren:EECS-2021-250,
title = {Three-Dimensional Phase Contrast Electron Tomography For Multiple Scattering Samples},
author = {Yonghuan David Ren},
url = {http://www2.eecs.berkeley.edu/Pubs/TechRpts/2021/EECS-2021-250.html},
year = {2021},
date = {2021-12-01},
urldate = {2021-12-01},
number = {UCB/EECS-2021-250},
school = {EECS Department, University of California, Berkeley},
abstract = {Three-dimensional (3D) electron tomography (ET) is used to understand the structure and properties of samples, for applications in chemistry, materials science, and biology. By illuminating the sample at many tilt angles using an electron probe and modelling the image formation model, 3D information can be reconstructed at a resolution beyond the optical diffraction limit. However, as samples become thicker and more scattering, simple image formation models assuming projections or single scattering are no longer valid, causing the reconstruction quality to degrade. In this work, we develop a framework that takes the non-linear image formation process into account by modelling multiple-scattering events between the electron probe and the sample. First, the general acquisition and inverse model to recover multiple-scattering samples is introduced. We mathematically derive both the forward multi-slice scattering method as well as the gradient calculations in order to solve the inverse problem with optimization. As well, with the addition of regularization, the framework is robust against low dose tomography applications. Second, we demonstrate in simulation the validity of our method by varying different experimental parameters such as tilt angles, defocus values and dosage. Next, we test our ET framework experimentally on a multiple-scattering Montemorillonite clay, a 2D material submerged in aqueous solution and vitrified under cryogenic temperature. The results demonstrate the ability to observe the electric double layer (EDL) of this material for the first time. Last but not least, because modern electron detectors have large pixel counts and current imaging applications require large volume reconstructions, we developed a distributed computing method that can be directly applied to our framework for seeing multiple-scattering samples. Instead of solving for the 3D sample on a single computer node, we utilize tens or hundreds of nodes on a compute cluster simultaneously, with each node solving for part of the volume. As a result, both high resolution sample features and macroscopic sample topology can be visualized at the same time.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
2020
Grace Kuo
Exploiting Randomness in Computational Cameras and Displays PhD Thesis
EECS Department, University of California, Berkeley, 2020.
@phdthesis{Kuo:EECS-2020-218,
title = {Exploiting Randomness in Computational Cameras and Displays},
author = {Grace Kuo},
url = {http://www2.eecs.berkeley.edu/Pubs/TechRpts/2020/EECS-2020-218.html},
year = {2020},
date = {2020-12-01},
number = {UCB/EECS-2020-218},
school = {EECS Department, University of California, Berkeley},
abstract = {Despite its desirability, capturing and displaying higher dimensional content is still a novelty since image sensors and display panels are inherently 2D. A popular option is to use scanning mechanisms to sequentially capture 3D data or display content at a variety of depths. This approach is akin to directly measuring (or displaying) the content of interest, which has low computational cost but sacrifices temporal resolution and requires complex physical hardware with moving parts. The exacting specifications on the hardware make it challenging to miniaturize these optical systems for demanding applications such as neural imaging in animals or head-mounted augmented reality displays.
In this dissertation, I propose moving the burden of 3D capture from hardware into computation by replacing the physical scanning mechanisms with a simple static diffuser (a transparent optical element with pseudorandom thickness) and formulating image recovery as an optimization problem. First, I highlight the versatility of the diffuser by showing that it can replace a lens to create an easy-to-assemble, compact camera that is robust to missing pixels; although the raw data is not intelligible by a human, it contains information that we extract with optimization using an efficient physically-based model of the optics. Next, I show that the randomness of the diffuser makes the system well-suited for compressed sensing; we leverage this to recover 3D volumes from a single acquisition of raw data. Additionally, I extend our lensless 3D imaging system to fluorescence microscopy and introduce a new diffuser design with improved noise performance. Finally, I show how incorporating the diffuser in a 3D holographic display expands the field-of-view, and I demonstrate state-of-the-art performance by using perceptually inspired loss functions when optimizing the display panel pattern. These results show how randomness in the optical system in conjunction with optimization-based algorithms can both improve the physical form factor and expand the capabilities of cameras, microscopes, and displays.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
In this dissertation, I propose moving the burden of 3D capture from hardware into computation by replacing the physical scanning mechanisms with a simple static diffuser (a transparent optical element with pseudorandom thickness) and formulating image recovery as an optimization problem. First, I highlight the versatility of the diffuser by showing that it can replace a lens to create an easy-to-assemble, compact camera that is robust to missing pixels; although the raw data is not intelligible by a human, it contains information that we extract with optimization using an efficient physically-based model of the optics. Next, I show that the randomness of the diffuser makes the system well-suited for compressed sensing; we leverage this to recover 3D volumes from a single acquisition of raw data. Additionally, I extend our lensless 3D imaging system to fluorescence microscopy and introduce a new diffuser design with improved noise performance. Finally, I show how incorporating the diffuser in a 3D holographic display expands the field-of-view, and I demonstrate state-of-the-art performance by using perceptually inspired loss functions when optimizing the display panel pattern. These results show how randomness in the optical system in conjunction with optimization-based algorithms can both improve the physical form factor and expand the capabilities of cameras, microscopes, and displays.
Michael Kellman
Physics-based Learning for Large-scale Computational Imaging PhD Thesis
EECS Department, University of California, Berkeley, 2020.
@phdthesis{Kellman:EECS-2020-167,
title = {Physics-based Learning for Large-scale Computational Imaging},
author = {Michael Kellman},
url = {http://www2.eecs.berkeley.edu/Pubs/TechRpts/2020/EECS-2020-167.html},
year = {2020},
date = {2020-08-01},
number = {UCB/EECS-2020-167},
school = {EECS Department, University of California, Berkeley},
abstract = {In computational imaging systems (e.g. tomographic systems, computational optics, magnetic resonance imaging) the acquisition of data and reconstruction of images are co-designed to retrieve information which is not traditionally accessible. The performance of such systems is characterized by how information is encoded to (forward process) and decoded from (inverse problem) the measurements. Recently, critical aspects of these systems, such as their signal prior, have been optimized using deep neural networks formed from unrolling the iterations of a physics-based image reconstruction.
In this dissertation, I will detail my work, physics-based learned design, to optimize the performance of the entire computational imaging system by jointly learning aspects of its experimental design and computational reconstruction. As an application, I introduce how the LED-array microscope performs super-resolved quantitative phase imaging and demonstrate how physics-based learning can optimize a reduced set of measurements without sacrificing performance to enable the imaging of live fast moving biology.
In this dissertation's latter half, I will discuss how to overcome some of the computational challenges encountered in applying physics-based learning concepts to large-scale computational imaging systems. I will describe my work, memory-efficient learning, that makes physics-based learning for large-scale systems feasible on commercially-available graphics processing units. I demonstrate this method on two large-scale real-world systems: 3D multi-channel compressed sensing MRI and super-resolution optical microscopy.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
In this dissertation, I will detail my work, physics-based learned design, to optimize the performance of the entire computational imaging system by jointly learning aspects of its experimental design and computational reconstruction. As an application, I introduce how the LED-array microscope performs super-resolved quantitative phase imaging and demonstrate how physics-based learning can optimize a reduced set of measurements without sacrificing performance to enable the imaging of live fast moving biology.
In this dissertation's latter half, I will discuss how to overcome some of the computational challenges encountered in applying physics-based learning concepts to large-scale computational imaging systems. I will describe my work, memory-efficient learning, that makes physics-based learning for large-scale systems feasible on commercially-available graphics processing units. I demonstrate this method on two large-scale real-world systems: 3D multi-channel compressed sensing MRI and super-resolution optical microscopy.
Nicholas Antipa
Lensless Computational Imaging using Random Optics PhD Thesis
2020.
@phdthesis{Antipa:EECS-2020-175,
title = {Lensless Computational Imaging using Random Optics},
author = {Nicholas Antipa},
url = {http://www2.eecs.berkeley.edu/Pubs/TechRpts/2020/EECS-2020-175.html},
year = {2020},
date = {2020-08-01},
number = {UCB/EECS-2020-175},
institution = {EECS Department, University of California, Berkeley},
abstract = {Efficiently capturing high-dimensional optical signals, such as temporal dynamics, depth, perspective, or spectral content is a difficult imaging challenge. Because image sensors are inherently two-dimensional, direct sampling of the many dimensions that completely describe a scene presents a significant engineering challenge. Computational imaging is a design approach in which imaging hardware and digital signal processing algorithms are designed jointly to achieve performance not possible with partitioned design schemes. Within this paradigm, the sensing hardware is viewed as an encoder, coding the information of interest into measurements that can be captured with conventional sensors. Algorithms are then used to decode the information. In this dissertation, I explore the connection between optical imaging system design and compressed sensing, demonstrating that extra dimensions of optical signals (time, depth, and perspective) can be encoded into a single 2D measurement, then extracted using sparse recovery methods. The key to these capabilities is exploiting the inherent multiplexing properties of diffusers, pseudorandom free-form phase optics that scramble incident light. Contrary to their intended use, I show that certain classes of diffuser encode high-dimensional information about the incident light field into high-contrast, pseudorandom intensity patterns (caustics). Sparse recovery methods can then decode these patterns, recovering 3D images from snapshot 2D measurements. This transforms a diffuser into a computational imaging element for high-dimensional capture at video rates. Efficient physical models are introduced that reduce the computational burden for image recovery as compared to explicit matrix approaches (the computational cost remains high, however). Lastly, analysis and theory is developed that enables optimization of customized diffusers for miniaturized 3D fluorescence microscopy.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
2019
Zachary F Phillips
Quantitative Microscopy using Coded Illumination PhD Thesis
University of California, Berkeley, 2019.
@phdthesis{phillips2019quant,
title = {Quantitative Microscopy using Coded Illumination},
author = {Zachary F Phillips},
url = {https://escholarship.org/uc/item/70d9j190
https://search.proquest.com/openview/ac36264c1a5b90ee3c8e4085759b0cc4/1?pq-origsite=gscholar&cbl=18750&diss=y},
year = {2019},
date = {2019-05-01},
school = {University of California, Berkeley},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Michael Chen
Coded Illumination for Multidimensional Quantitative Phase Imaging PhD Thesis
University of California, Berkeley, 2019.
@phdthesis{chen2019coded,
title = {Coded Illumination for Multidimensional Quantitative Phase Imaging},
author = {Michael Chen},
url = {http://www2.eecs.berkeley.edu/Pubs/TechRpts/2020/EECS-2020-37.html},
year = {2019},
date = {2019-05-01},
school = {University of California, Berkeley},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Li-Hao Yeh
Computational fluorescence and phase super-resolution microscopy PhD Thesis
University of California, Berkeley, 2019.
@phdthesis{yeh2019computationalb,
title = {Computational fluorescence and phase super-resolution microscopy},
author = {Li-Hao Yeh},
url = {http://www2.eecs.berkeley.edu/Pubs/TechRpts/2020/EECS-2020-36.html},
year = {2019},
date = {2019-05-01},
school = {University of California, Berkeley},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
2018
Aamod Shanker; Antoine Wojdyla; Laura Waller; Andrew R. Neureuther
Differential methods in phase imaging for optical lithography PhD Thesis
PhD thesis, University of California, Berkeley (May 2018), 2018.
@phdthesis{shanker2018differential,
title = {Differential methods in phase imaging for optical lithography},
author = { Aamod Shanker and Antoine Wojdyla and Laura Waller and Andrew R. Neureuther},
url = {https://www2.eecs.berkeley.edu/Pubs/TechRpts/2018/EECS-2018-160.html
http://www2.eecs.berkeley.edu/Pubs/TechRpts/2018/EECS-2018-160.pdf},
year = {2018},
date = {2018-12-01},
school = {PhD thesis, University of California, Berkeley (May 2018)},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Hsiou-Yuan Liu
Optical Phase Space Measurements and Applications to 3D Imaging and Light Scattering PhD Thesis
University of California, Berkeley, 2018, ISBN: 978-0-438-65426-6.
@phdthesis{liu2018optical,
title = {Optical Phase Space Measurements and Applications to 3D Imaging and Light Scattering},
author = {Hsiou-Yuan Liu},
url = {https://search-proquest-com.libproxy.berkeley.edu/docview/2137546920?accountid=14496},
isbn = {978-0-438-65426-6},
year = {2018},
date = {2018-05-01},
school = {University of California, Berkeley},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
2015
Rene A Claus
Partially Coherent Quantitative Phase Retrieval with Applications to Extreme Ultraviolet Lithography PhD Thesis
University of California, Berkeley, 2015, ISBN: 978-1-339-59311-1.
@phdthesis{claus2015partially,
title = {Partially Coherent Quantitative Phase Retrieval with Applications to Extreme Ultraviolet Lithography},
author = {Rene A Claus},
url = {https://search-proquest-com.libproxy.berkeley.edu/docview/1779555925?accountid=14496},
isbn = {978-1-339-59311-1},
year = {2015},
date = {2015-12-01},
school = {University of California, Berkeley},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Zeyi Lee; Mark Hardiman; Ryan Frazier; Ying Ou; Longxiang Cui; Laura Waller; Ming C Wu
Project NoScope Masters Thesis
University of California, Berkeley, 2015.
@mastersthesis{lee2015capstone,
title = {Project NoScope},
author = { Zeyi Lee and Mark Hardiman and Ryan Frazier and Ying Ou and Longxiang Cui and Laura Waller and Ming C Wu},
url = {https://www2.eecs.berkeley.edu/Pubs/TechRpts/2015/EECS-2015-52.html
http://www2.eecs.berkeley.edu/Pubs/TechRpts/2015/EECS-2015-52.pdf},
year = {2015},
date = {2015-05-19},
school = {University of California, Berkeley},
keywords = {},
pubstate = {published},
tppubtype = {mastersthesis}
}
2014
Aamod Shanker; Laura Waller; Andrew R Neureuther
Defocus based phase imaging for quantifying electromagnetic edge effects in photomasks Masters Thesis
Master’s thesis, University of California, Berkeley (May 2014), 2014.
@mastersthesis{shanker2014defocus,
title = {Defocus based phase imaging for quantifying electromagnetic edge effects in photomasks},
author = { Aamod Shanker and Laura Waller and Andrew R Neureuther},
url = {https://www2.eecs.berkeley.edu/Pubs/TechRpts/2014/EECS-2014-105.html},
year = {2014},
date = {2014-05-16},
school = {Master’s thesis, University of California, Berkeley (May 2014)},
keywords = {},
pubstate = {published},
tppubtype = {mastersthesis}
}
2010
Laura Waller
Computational phase imaging based on intensity transport PhD Thesis
Massachusetts Institute of Technology, 2010.
@phdthesis{waller2010computational,
title = {Computational phase imaging based on intensity transport},
author = { Laura Waller},
url = {http://hdl.handle.net/1721.1/60821},
year = {2010},
date = {2010-06-01},
school = {Massachusetts Institute of Technology},
abstract = {Light is a wave, having both an amplitude and a phase. However, optical frequencies are too high to allow direct detection of phase; thus, our eyes and cameras see only real values - intensity. Phase carries important information about a wavefront and is often used for visualization of biological samples, density distributions and surface profiles. This thesis develops new methods for imaging phase and amplitude from multi-dimensional intensity measurements. Tomographic phase imaging of diffusion distributions is described for the application of water content measurement in an operating fuel cell. Only two projection angles are used to detect and localize large changes in membrane humidity. Next, several extensions of the Transport of Intensity technique are presented. Higher order axial derivatives are suggested as a method for correcting nonlinearity, thus improving range and accuracy. To deal with noisy images, complex Kalman filtering theory is proposed as a versatile tool for complex-field estimation. These two methods use many defocused images to recover phase and amplitude. The next technique presented is a single-shot quantitative phase imaging method which uses chromatic aberration as the contrast mechanism. Finally, a novel single-shot complex-field technique is presented in the context of a Volume Holographic Microscopy (VHM). All of these techniques are in the realm of computational imaging, whereby the imaging system and post-processing are designed in parallel.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
2005
Laura Waller
Feedback loop design and experimental testing for integrated optics with micro-mechanical tuning Masters Thesis
Massachusetts Institute of Technology, 2005.
@mastersthesis{waller2005feedback,
title = {Feedback loop design and experimental testing for integrated optics with micro-mechanical tuning},
author = { Laura Waller},
url = {http://hdl.handle.net/1721.1/33383},
year = {2005},
date = {2005-05-19},
school = {Massachusetts Institute of Technology},
keywords = {},
pubstate = {published},
tppubtype = {mastersthesis}
}