@article {807, title = {OpenCLIPER: An OpenCL-Based C++ Framework for Overhead-Reduced Medical Image Processing and Reconstruction on Heterogeneous Devices}, journal = {IEEE Journal of Biomedical and Health Informatics}, volume = {23}, year = {2019}, month = {July}, pages = {1702-1709}, abstract = {

Medical image processing is often limited by the computational cost of the involved algorithms. Whereas dedicated computing devices (GPUs in particular) exist and do provide significant efficiency boosts, they have an extra cost of use in terms of housekeeping tasks (device selection and initialization, data streaming, synchronization with the CPU, and others), which may hinder developers from using them. This paper describes an OpenCL-based framework that is capable of handling dedicated computing devices seamlessly and that allows the developer to concentrate on image processing tasks. The framework handles automatically device discovery and initialization, data transfers to and from the device and the file system and kernel loading and compiling. Data structures need to be defined only once independently of the computing device; code is unique, consequently, for every device, including the host CPU. Pinned memory/buffer mapping is used to achieve maximum performance in data transfers. Code fragments included in the paper show how the computing device is almost immediately and effortlessly available to the users algorithms, so they can focus on productive work. Code required for device selection and initialization, data loading and streaming and kernel compilation is minimal and systematic. Algorithms can be thought of as mathematical operators (called processes), with input, output and parameters, and they may be chained one after another easily and efficiently. Also for efficiency, processes can have their initialization work split from their core workload, so process chains and loops do not incur in performance penalties. Algorithm code is independent of the device type targeted.

}, keywords = {C++, C++ languages, Data structures, GPU, Graphics processing units, Image reconstruction, Informatics, Kernel, Libraries, Medical imaging, OpenCL}, issn = {2168-2194}, doi = {10.1109/JBHI.2018.2869421}, author = {Federico Simmross-Wattenberg and M. Rodr{\'\i}guez-Cayetano and J Royuela-del-Val and E. Mart{\'\i}n-Gonz{\'a}lez and E. Moya-S{\'a}ez and M. Mart{\'\i}n-Fern{\'a}ndez and C. Alberola-L{\'o}pez} } @conference {413, title = {Fusing Output Information in Neural Networks: Ensemble Performs Better}, booktitle = {Annual International Conference of the IEEE Engineering in Medicine and Biology - Proceedings}, year = {2003}, address = {Cancun}, abstract = {

A neural network ensemble is a learning paradigm where a finite number of component neural networks are trained for the same task. Previous research suggests that an ensemble as a whole is often more accurate than any of the single component networks. This paper focuses on the advantages of fusing different nature network architectures, and to determine the appropriate information fusion algorithm in component neural networks by several approaches within hard decision classifiers, when solving a binary pattern recognition problem. We numerically simulated and compared the different fusion approaches in terms of the mean-square error rate in testing data set, over synthetically generated binary Gaussian noisy data, and stated the advantages of fusing the hard outputs of different component networks to make a final hard decision classification. The results of the experiments indicate that neural network ensembles can indeed improve the overall accuracy for classification problems; in all fusion architectures tested, the ensemble correct classification rates are better than those achieved by the individual component networks. Finally we are nowadays comparing the above mentioned hard decision classifiers with new soft decision classifier architectures that make use of the additional continuous type intermediate network soft outputs, fulfilling probability fundamental laws (positive, and add to unity), which can be understood as the a posteriori probabilities of a given pattern to belong to a certain class.

}, keywords = {Algorithms, Backpropagation, Classification (of information), Computer simulation, Decision making, Estimation, Gaussian noise (electronic), Information fusions, Mathematical models, Medical imaging, Model selection, Multilayer neural networks, Neural network ensembles, Pattern recognition, Probability, Probability estimation, Problem solving, Regularization, Statistical methods, Statistical pattern recognition, Vectors}, doi = {https://doi.org/10.1109/IEMBS.2003.1280254}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-1542301061\&partnerID=40\&md5=32dbadb3b6ac3c6ae1ea33d89b52c75f}, author = {Y Wu and J I Arribas} } @conference {414, title = {A fully automatic algorithm for contour detection of bones in hand radiographs using active contours}, booktitle = {IEEE International Conference on Image Processing}, year = {2003}, address = {Barcelona}, abstract = {

This paper1 presents an algorithm for automatically detecting bone contours from hand radiographs using active contours. Prior knowledge is first used to locate initial contours for the snakes inside each bone of interest. Next, an adaptive snake algorithm is applied so that parameters are properly adjusted for each bone specifically. We introduce a novel truncation technique to prevent the external forces of the snake from pulling the contour outside the bones boundaries, yielding excelent results.

}, keywords = {Active contours, Algorithms, Bone, Cocentric circumferences, Contour measurement, Medical imaging, Object recognition, Radiography}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-0344271749\&partnerID=40\&md5=5fcf06edb482cc1527b2e8d3a940065b}, author = {Rodrigo de Luis-Garc{\'\i}a and Marcos Martin-Fernandez and J I Arribas and Carlos Alberola-Lopez} }