% Encoding: UTF-8
@COMMENT{BibTeX export based on data in FAU CRIS: https://cris.fau.de/}
@COMMENT{For any questions please write to cris-support@fau.de}
@inproceedings{faucris.206226591,
author = {Bartuschat, Dominik and Gmeiner, Björn and Thönnes, Dominik and Kohl, Nils and Rüde, Ulrich and Drzisga, Daniel and Huber, Markus and John, Lorenz and Waluga, Christian and Wohlmuth, B. I. and Bauer, Simon and Mohr, Marcus and Bunge, Hans-Peter},
booktitle = {SIAM Conference on Parallel Processing for Scientific Computing (SIAM PP 18)},
date = {2018-03-07/2018-03-10},
faupublication = {yes},
peerreviewed = {unknown},
title = {{A} {Finite} {Element} {Multigrid} {Framework} for
{Extreme}-{Scale} {Earth} {Mantle} {Convection} {Simulations}},
url = {https://www10.cs.fau.de/publications/talks/2018/Thoennes{\_}Tokio{\_}SIAMPP18{\_}2018-03-09.pdf},
venue = {Tokyo},
year = {2018}
}
@inproceedings{faucris.107108584,
author = {Bartuschat, Dominik and Gmeiner, Björn and Thönnes, Dominik and Kohl, Nils and Rüde, Ulrich and Drzisga, Daniel Peter and Huber, Markus and John, Lorenz and Waluga, Christian and Wohlmuth, B. I. and Bauer, Simon and Mohr, Marcus and Bunge, Hans-Peter},
booktitle = {SIAM Conference on Parallel Processing for Scientific Computing (SIAM PP 18)},
date = {2018-03-07/2018-03-10},
faupublication = {yes},
peerreviewed = {unknown},
title = {{A} {Finite} {Element} {Multigrid} {Framework} for
{Extreme}-{Scale} {Earth} {Mantle} {Convection} {Simulations}},
url = {https://www10.cs.fau.de/publications/talks/2018/Bartuschat{\_}Tokyo{\_}SIAMPP2018{\_}2018-03-10.pdf},
venue = {Tokyo},
year = {2018}
}
@inproceedings{faucris.122859484,
abstract = {Patch-based approaches in imaging require heavy computations on many small sub-blocks of images but are easily parallelizable since usually different sub-blocks can be treated independently. In order to make these approaches useful in practical applications efficient algorithms and implementations are required. Newer architectures like the Cell Broadband Engine Architecture (CBEA) make it even possible to come close to real-time performance for moderate image sizes. In this article we present performance results for image denoising on the CBEA. The image denoising is done by finding sparse representations of signals from a given overcomplete dictionary and assuming that noise cannot be represented sparsely. We compare our results with a standard multicore implementation and show the gain of the CBEA. © 2010 Springer-Verlag Berlin Heidelberg.},
address = {Berlin Heidelberg},
author = {Bartuschat, Dominik and Stürmer, Markus and Köstler, Harald},
booktitle = {Parallel Processing and Applied Mathematics},
date = {2009-09-13/2009-09-16},
doi = {10.1007/978-3-642-14390-8{\_}58},
faupublication = {yes},
isbn = {978-3-642-14389-2},
note = {UnivIS-Import:2015-04-16:Pub.2010.tech.IMMD.lsinfs.anorth},
pages = {557-566},
peerreviewed = {Yes},
publisher = {Springer-verlag},
series = {Lecture Notes in Computer Science},
title = {{An} {Orthogonal} {Matching} {Pursuit} {Algorithm} for {Image} {Denoising} on the {Cell} {Broadband} {Engine}},
url = {http://www.springerlink.com/content/JXJ06Q00R1U03516},
venue = {Wroclaw},
volume = {5057},
year = {2010}
}
@inproceedings{faucris.118690484,
abstract = {Numerical simulations are an indispensable tool in geosciences for understanding geodynamic processes inside the Earth.
Due to the enormous spatial and time scales and the inaccessibility of the Earth's interior to direct measurements,
studying these processes requires a combination of sophisticated computer simulations and mostly indirect observations.
Heating inside the Earth's core and mantle causes convection currents in the solid Earth mantle, which results in a viscous flow on geological time scales of millions of years.
This mantle convection is the driving mechanism of plate tectonics, which causes mountain building, earthquakes and volcanism.
However, many details of the physical processes in Earth mantle convection are poorly known, such as appropriate rheological parameters or the mantle viscosity structure.
To allow for the use of realistic physical parameters, Earth mantle convection simulations require
extremely large grids for a sufficient resolution of the mantle volume of 10^{12} km^3 and many time steps.
These simulations are only possible with highly efficient codes that exhbit excellent parallel scalability on modern supercomputers.
In this talk, we present a framework for such large-scale time-dependent mantle convection simulations on a thick spherical shell with variable viscosity.
In the simulations a nonlinear coupled multiphysics problem of Stokes equation coupled to the energy equation is solved,
modeling the conservation of momentum, mass, and energy.
These equations are discretized with finite elements and the solution is computed in the Hierarchical Hybrid Grids (HHG) framework.
HHG combines the flexibility of unstructured tetrahedral meshes with the efficiency of structured grids for finite element discretizations.
The design of this framework is motivated by the challenging goal of achieving high performance on large-scale and parallel
finite element simulations on supercomputers. HHG exploits the performance and efficiency of nested structured grid
hierarchies and hierarchically organized data structures combined with the flexibility of unstructured grids.
To this end, HHG combines grid partitioning and regular refinement in such a way that an execution paradigm using stencils can be realized.
Within uniform blocks of the mesh three-dimensional stencils are applied in the fashion of a finite difference method.
We present transient simulation results of the temperature distribution for the coupled flow and transport problem,
as well as the stationary flow field for variable temperature-dependent viscosity with high viscosity contrasts.
Moreover, scaling results are presented to show that our approach facilitates solving systems in excess of ten trillion ($10^13$) unknowns
on Peta-Scale systems using compute times of a few minute},
author = {Bartuschat, Dominik and Rüde, Ulrich and Thönnes, Dominik and Kohl, Nils and Drzisga, Daniel Peter and Huber, Markus and John, Lorenz and Waluga, Christian and Wohlmuth, B. I. and Bauer, Simon and Mohr, Marcus and Bunge, Hans-Peter},
booktitle = {CSEConf2017 -- 2017 International Conference on Computational Science and Engineering - Software, Education, and Biomedical applications},
date = {2017-10-23/2017-10-25},
faupublication = {yes},
peerreviewed = {unknown},
title = {{A} parallel finite element multigrid framework for geodynamic simulations with more than ten trillion unknowns},
url = {https://www10.cs.fau.de/publications/talks/2017/Bartuschat{\_}Oslo{\_}CSEconf17{\_}2017-10-23.pdf},
venue = {Oslo, Norwegen},
year = {2017}
}
@misc{faucris.109099144,
author = {Bartuschat, Dominik and Borsdorf, Anja and Köstler, Harald and Rubinstein, R. and Stürmer, Markus},
faupublication = {yes},
note = {UnivIS-Import:2016-06-30:Pub.2009.tech.IMMD.lsinfs.aparal},
peerreviewed = {automatic},
title = {{A} parallel {K}-{SVD} implementation for {CT} image denoising},
url = {https://www10.cs.fau.de/publications/reports/TechRep{\_}2009-01.pdf},
year = {2009}
}
@inproceedings{faucris.118040604,
author = {Bartuschat, Dominik and Bauer, Martin and Bogner, Simon and Godenschwager, Christian and Rüde, Ulrich},
booktitle = {UGC-DAAD Project, LBM Lecture Series 2017},
date = {2017-01-09/2017-01-13},
faupublication = {yes},
month = {Jan},
peerreviewed = {No},
title = {{Applications} of {Lattice} {Boltzmann} {Methods}},
url = {https://www10.cs.fau.de/publications/talks/2017/Bartuschat{\_}Delhi{\_}UGC-DAAD{\_}2017-01-12.pdf},
venue = {IIT Delhi, Neu Delhi},
year = {2017}
}
@incollection{faucris.204136823,
address = {},
author = {Kohl, Nils and Thönnes, Dominik and Drzisga, Daniel and Bartuschat, Dominik and Rüde, Ulrich},
booktitle = {From Parallel to Emergent Computing},
doi = {10.1201/9781315167084-8},
editor = {Andrew Adamatzky, Selim Akl, Georgios Sirakoulis},
faupublication = {yes},
isbn = {9781138054011},
peerreviewed = {unknown},
publisher = {Taylor & Francis},
title = {{A} {Scalable} and {Modular} {Software} {Architecture} for {Finite} {Elements} on {Hierarchical} {Hybrid} {Grids}},
year = {2019}
}
@article{faucris.200540070,
abstract = {We introduce a novel coupled algorithm for massively parallel direct
numerical simulations of electrophoresis in microfluidic flows. This
multiphysics algorithm employs an Eulerian description of fluid and
ions, combined with a Lagrangian representation of moving charged
particles. The fixed grid facilitates efficient solvers and the employed
lattice Boltzmann method can efficiently handle complex geometries.
Validation experiments with more than 70 000 time steps are presented,
together with scaling experiments with over 4 × 106 particles and 1.96 × 1011
grid cells for both hydrodynamics and electric potential. We achieve
excellent performance and scaling on up to 65 536 cores of a current
supercompute},
author = {Bartuschat, Dominik and Rüde, Ulrich},
doi = {10.1016/j.jocs.2018.05.011},
faupublication = {yes},
journal = {Journal of Computational Science},
keywords = {Parallel simulation; Electrokinetic flow; Electrophoresis; Fluid–particle interaction; MPI},
pages = {147 - 167},
peerreviewed = {unknown},
title = {{A} scalable multiphysics algorithm for massively parallel direct numerical simulations of electrophoresis},
volume = {27},
year = {2018}
}
@inproceedings{faucris.117294804,
author = {Preclik, Tobias and Bartuschat, Dominik and Rüde, Ulrich and Fattahi, Ehsan and Wohlmuth, B. I.},
booktitle = {HIGH PERFORMANCE COMPUTING IN SCIENCE AND ENGINEERING},
date = {2015-05-25/2015-05-28},
faupublication = {yes},
peerreviewed = {No},
title = {{Coupled} {Physical} {Models} for {Extreme}-{Scale} {Computing}},
venue = {Ostrava},
year = {2015}
}
@inproceedings{faucris.106461784,
author = {Preclik, Tobias and Bartuschat, Dominik and Rüde, Ulrich and Fattahi, Ehsan and Wohlmuth, B. I.},
booktitle = {CADMOS Day},
date = {2015-06-11/2015-06-11},
faupublication = {yes},
peerreviewed = {No},
title = {{Coupled} {Physical} {Models} for {Extreme}-{Scale} {Computing}},
venue = {EPFL Lausanne},
year = {2015}
}
@inproceedings{faucris.117300524,
author = {Rüde, Ulrich and Preclik, Tobias and Bartuschat, Dominik},
booktitle = {Minisymposium “Decoupling methods for multi-physics and multiscale problems” auf der ICIAM 2015},
date = {2015-08-10/2015-08-14},
faupublication = {yes},
peerreviewed = {No},
title = {{Direct} numerical simulation of charged particles in complex flows},
venue = {Beijing},
year = {2015}
}
@phdthesis{faucris.122298704,
abstract = {Particle-laden electrokinetic flows occur in a wide range of industrial and medical processes. Notable applications include electrostatic filters, drug administration via the respiratory system, and manipulation and actuation of biological particles and liquids in lab-on-a-chip systems. These electrokinetic flows comprise fluid flow, charged objects, and electric fields. The complex interplay of these effects makes calculations and predictions of electrokinetic systems difficult, especially for the large numbers of particles typically involved.
Computer simulations have become a powerful means to predict, analyze, and optimize the behavior of complex processes and systems. Nevertheless, the development of models and algorithms for multiphysics simulations that incorporate and couple multiple physical effects remains a challenging task. Moreover, multiphysics simulations of realistic scenarios with up to millions of interacting particles often require parallel supercomputers.
In this thesis, efficient algorithms for physically accurate, massively parallel multiphysics simulations of particle-laden electrokinetic flows on advanced high performance computers are presented. These fully parallelized, coupled algorithms are implemented within the software framework WALBERLA in a modular fashion. The modular software design ensures flexibility in the coupling of different algorithms, and extensibility for the implementation of more detailed or additional models. Excellent computational performance and parallel scalability are achieved by a careful parallel implementation and performance optimizations.
For direct numerical simulations of fluid-particle interactions, a lattice Boltzmann algorithm modeling the fluid flow is coupled to a computational model of rigid body dynamics for geometrically fully resolved particles. The coupled electric effects are modeled by electric potentials represented by a finite volume discretization on a mesh conforming to the lattice Boltzmann grid. For solving the electric potential equations and elliptic partial differential equations modeling other physical effects, WALBERLA is augmented by efficient parallel iterative solvers and by new boundary condition handling functionality.
The coupled models and their implementation are systematically validated, and the correctness of the overall multiphysics algorithms for electrokinetic flows with charged or uncharged particles in the presence or absence of ions in the fluid is verified. Moreover, the fluid-particle interaction is validated for spherical and elongated particles, and the tumbling motion of spherocylinders in the viscous flow regime is examined. In the validation experiments, the influence of several simulation parameters on physical accuracy is studied. Benchmark scenarios demonstrate the suitability of the different multiphysics algorithms for real-world applications.
To show the outstanding parallel performance of the algorithms for the simulation of millions of charged particles in fluid flow, their parallel scaling and numerical efficiency are analyzed on an advanced supercomputer.
},
author = {Bartuschat, Dominik},
faupublication = {yes},
peerreviewed = {automatic},
school = {Friedrich-Alexander-Universität Erlangen-Nürnberg},
title = {{Direct} {Numerical} {Simulation} of {Particle}-{Laden} {Electrokinetic} {Flows} on {High} {Performance} {Computers}},
url = {https://opus4.kobv.de/opus4-fau/frontdoor/index/index/docId/7298},
year = {2016}
}
@article{faucris.117304264,
abstract = {In this paper, a hybrid lattice-Boltzmann and finite-difference (LB-FD) model is applied to simulate the effects of three-dimensional surface roughness and electrokinetic heterogeneity on electroosmotic flow (EOF) in a microchannel. The lattice-Boltzmann (LB) method has been employed to obtain the flow field and a finite-difference (FD) method is used to solve the Poisson-Boltzmann (PB) equation for the electrostatic potential distribution. Numerical simulation of flow through a square cross-section microchannel with designed roughness is conducted and the results are critically analysed. The effects of surface heterogeneity on the electroosmotic transport are investigated for different roughness height, width, roughness interval spacing, and roughness surface potential. Numerical simulations reveal that the presence of surface roughness changes the nature of electroosmotic transport through the microchannel. It is found that the electroosmotic velocity decreases with the increase in roughness height and the velocity profile becomes asymmetric. For the same height of the roughness elements, the EOF velocity rises with the increase in roughness width. For the heterogeneously charged rough channel, the velocity profile shows a distinct deviation from the conventional plug-like flow pattern. The simulation results also indicate locally induced flow vortices which can be utilized to enhance the flow and mixing within the microchannel. The present study has important implications towards electrokinetic flow control in the microchannel, and can provide an efficient way to design a microfluidic system of practical interest.},
author = {Masilamani, Kannan and Ganguly, Suvankar and Feichtinger, Christian and Bartuschat, Dominik and Rüde, Ulrich},
doi = {10.1088/0169-5983/47/3/035505},
faupublication = {yes},
journal = {Fluid Dynamics Research},
keywords = {microchannel; electroosmosis; lattice Boltzmann; numerical; simulation},
note = {UnivIS-Import:2015-07-08:Pub.2015.tech.IMMD.lsinfs.effect},
pages = {1-19},
peerreviewed = {Yes},
title = {{Effects} of surface roughness and electrokinetic heterogeneity on electroosmotic flow in microchannel},
url = {http://iopscience.iop.org/article/10.1088/0169-5983/47/3/035505/pdf},
volume = {47},
year = {2015}
}
@inproceedings{faucris.213838712,
abstract = {Multigrid methods are an important class of iterative solvers and preconditioners as they provide fast convergence rates with O(n) complexity on specific classes of problems such as elliptic problems with uniform coefficients. While their implementation on distributed memory system has been shown to scale well both weakly and strongly, the advent of new mixed architectures requires a redesign of some core kernels of these methods. This minisymposium aims at exploring methods leveraging fully or partially structured grids: fully structured grids, block-structured grids, mixed structured/unstructured grids or nested grids. Exploiting the regularity and the predictable indexing of the data leads to new more parallel and scalable algorithms. This minisymposium also focuses on the parallel shared memory implementations of these algorithms for mixed architectures as it is an increasingly important aspect of high performance computing on recent architectures aiming at scalability at exascale.
The high computational demand of such simulations, however, often requires the use of parallel supercomputers when realistic application scenarios are studied.
In this talk, coupled multiphysics algorithms for massively parallel simulations of particulate electrokinetic micro-fluid flows are presented.
These direct numerical simulations employ a momentum-exchange coupling to describe the interaction of the fluid phase with the particulate phase is represented by geometrically resolved rigid bodies.
Additionally electric effects are considered, such as electrostatic interactions between charged particles, as well as electric fields acting on both resolved particles and on ions in the fluid.
By modeling the ion transport by a continuum description and simulating the effect of the ion transport on fluid motion, the electric field is also coupled to the fluid phase.
The simulations are realized by a coupling of the lattice Boltzmann method for fluid dynamics to a computational model of the dynamics of rigid particles and to a quasistatic continuum model of the electric potential that is discretized with finite volumes.
In addition to validation experiments, scaling experiments of several millions of fully resolved particles and 196 billion (10^9) mesh cells for both hydrodynamics and electric potential are presented.
The computations are executed in a fully scalable fashion on up to 65 536 processor cores of a current supercomputer.
The article describes parallel multiphysics simulations of charged particles in microfluidic flows with the waLBerla framework. To this end, three physical effects are coupled: rigid body dynamics, fluid flow modelled by a lattice Boltzmann algorithm, and electric potentials represented by a finite volume discretisation. For solving the finite volume discretisation for the electrostatic forces, a cell-centred multigrid algorithm is developed that conforms to the lattice Boltzmann meshes and the parallel communication structure of waLBerla. The new functionality is validated with suitable benchmark scenarios. Additionally, the parallel scaling and the numerical efficiency of the algorithms are analysed on an advanced supercomputer.},
author = {Bartuschat, Dominik and Rüde, Ulrich},
doi = {10.1016/j.jocs.2015.02.006},
faupublication = {yes},
journal = {Journal of Computational Science},
keywords = {Parallel simulation; Electrokinetic flow; Fluid–particle interaction; Cell-centered multigrid; MPI},
pages = {1-19},
peerreviewed = {unknown},
title = {{Parallel} {Multiphysics} {Simulations} of {Charged} {Particles} in {Microfluidic} {Flows}},
url = {http://www.sciencedirect.com/science/article/pii/S1877750315000162},
volume = {8},
year = {2015}
}
@article{faucris.114025604,
abstract = {The pe physics engine is validated through the simulation of a liquid crystal model system consisting of hard spherocylinders. For this purpose we evaluate several characteristic parameters of this system, namely the nematic order parameter, the pressure, and the Frank elastic constants. We compare these to the values reported in literature and find a very good agreement, which demonstrates that the pe physics engine can accurately treat such densely packed particle systems. Simultaneously we are able to examine the influence of finite size effects, especially on the evaluation of the Frank elastic constants, as we are far less restricted in system size than earlier simulations.},
author = {Fischermeier, Ellen and Bartuschat, Dominik and Preclik, Tobias and Marechal, Mattheus and Mecke, Klaus},
doi = {10.1016/j.cpc.2014.08.014},
faupublication = {yes},
journal = {Computer Physics Communications},
keywords = {Frank elastic constants; Hard rod; Liquid crystal; Parallel framework; Rigid body dynamics},
note = {UnivIS-Import:2015-03-09:Pub.2014.tech.IMMD.lsinfs.simula{\_}0},
pages = {3156-3161},
peerreviewed = {Yes},
title = {{Simulation} of a hard-spherocylinder liquid crystal with the pe},
url = {http://www.sciencedirect.com/science/article/pii/S0010465514002926},
volume = {185},
year = {2014}
}
@inproceedings{faucris.122152844,
author = {Bartuschat, Dominik and Rüde, Ulrich},
booktitle = {5th European Seminar on Computing},
date = {2016-06-05/2016-06-10},
faupublication = {yes},
peerreviewed = {No},
title = {{Simulation} of {Earth} {Mantle} {Convection}},
url = {https://www10.cs.fau.de/publications/talks/2015/Ruede{\_}ESCO{\_}Pilsen{\_}2016-06-09.pdf},
venue = {Pilsen},
year = {2016}
}
@inproceedings{faucris.206227602,
author = {Thönnes, Dominik and Kohl, Nils and Bartuschat, Dominik and Rüde, Ulrich},
booktitle = {SIAM Conference on Parallel Processing for Scientific Computing (SIAM PP 18)},
date = {2018-03-07/2018-03-10},
faupublication = {yes},
peerreviewed = {unknown},
title = {{Sustainability} and {Efficiency} for {Simulation} {Software} in the {Exascale} {Era}},
url = {https://www10.cs.fau.de/publications/talks/2018/Thoennes{\_}Tokio{\_}SIAMPP18{\_}2018-03-09.pdf},
venue = {Tokyo},
year = {2018}
}
@inproceedings{faucris.200790292,
author = {Kohl, Nils and Thönnes, Dominik and Drzisga, Daniel and Bartuschat, Dominik and Rüde, Ulrich},
booktitle = {SPPEXA Annual Plenary Meeting 2018},
date = {2018-03-21/2018-03-22},
faupublication = {yes},
peerreviewed = {unknown},
title = {{Terra}-{Neo} - {Integrated} {Co}-{Design} of an {Exascale} {Earth} {Mantle} {Modeling} {Framework}},
venue = {Institute for Advanced Study, Garching bei München, Germany},
year = {2018}
}
@inproceedings{faucris.204140108,
author = {Kohl, Nils and Thönnes, Dominik and Drzisga, Daniel and Bartuschat, Dominik and Rüde, Ulrich},
booktitle = {International Symposium on Computational Science at Scale},
date = {2018-09-05/2018-09-07},
faupublication = {yes},
peerreviewed = {unknown},
title = {{The} {HyTeG} {Finite}-{Element} {Framework} for {Scalable} {Geophysics} {Simulations}},
url = {https://www10.cs.fau.de/publications/posters/2018/Kohl{\_}CoSaS{\_}2018.pdf},
venue = {Friedrich-Alexander-Universität Erlangen-Nürnberg},
year = {2018}
}
@article{faucris.200726331,
abstract = {