@inproceedings{AbdankAburaiaWoeber, author = {Abdank, Moritz and Aburaia, Mohamed and W{\"o}ber, Wilfried}, title = {Using-Colour-Based Object Detection for Pick and Place Applications}, series = {Proceedings of the 32nd International DAAAM Virtual Symposium 'Intelligent Manufacturing \& Automation', 28-29th October 2021, Vienna}, volume = {32}, booktitle = {Proceedings of the 32nd International DAAAM Virtual Symposium 'Intelligent Manufacturing \& Automation', 28-29th October 2021, Vienna}, number = {1}, pages = {0536 -- 0541}, subject = {Computer Vision}, language = {en} } @techreport{OrsolitsKorn, author = {Orsolits, Horst and Korn, Alexander}, title = {Augmented Reality basierte Robotik-Lehre}, organization = {Gesellschaft f{\"u}r Informatik e.V., Wettbewerbsband AVRiL 2021}, pages = {8}, abstract = {Diese Arbeit beschreibt eine Augmented Reality (AR) Applikation f{\"u}r den Einsatz in der Hochschullehre zum Thema Industrierobotik. Ziel ist es, sowohl das Lehren als auch das Lernen grundlegender Robotik-Inhalte durch die Bereitstellung einer interaktiven Methode zur Vermittlung der Lehrinhalte f{\"u}r Studierende zu verbessern. Die Studierenden sind in der Lage, direkt mit dem virtuellen Modell eines Industrieroboters zu interagieren und so selbstgesteuert die Lerninhalte zu vertiefen. Diese interaktive Methode verbindet die Studierenden direkt mit den Lehrinhalten und f{\"o}rdert das selbsterforschende Lernen. Eine weitere Anwendungsm{\"o}glichkeit sieht die Kombination einer Lektorenversion der AR Experience mit der Studierendenversion vor. Der Lektor hat die M{\"o}glichkeit, das Modell in AR zu steuern bzw. zu ver{\"a}ndern und die Studierenden k{\"o}nnen auf Ihren Mobilger{\"a}ten die {\"A}nderungen live in AR mitverfolgen, um so auch im Distance Learning eine Verbindung Lektor - Studierende - Inhalt zus{\"a}tzlich zu Videokonferenz-Tools herzustellen.}, subject = {Augmented Reality}, language = {de} } @article{WoeberCurtoTibihikaetal., author = {W{\"o}ber, Wilfried and Curto, Manuel and Tibihika, Papius D. and Meulenboek, Paul and Alemayehu, Esayas and Mehnen, Lars and Meimberg, Harald and Sykacek, Peter}, title = {Identifying geographically differentiated features of Ethopian Nile tilapia (Oreochromis niloticus) morphology with machine learning}, series = {PlosONE}, volume = {16}, journal = {PlosONE}, number = {4}, subject = {Machine Learning}, language = {en} } @article{WoeberMehnenSykaceketal., author = {W{\"o}ber, Wilfried and Mehnen, Lars and Sykacek, Peter and Meimberg, Harald}, title = {Investigating Explanatory Factors of Machine Learning Models for Plant Classification}, series = {Plants}, volume = {2021}, journal = {Plants}, number = {10(12):2674}, pages = {20}, abstract = {Recent progress in machine learning and deep learning has enabled the implementation of plant and crop detection using systematic inspection of the leaf shapes and other morphological characters for identification systems for precision farming. However, the models used for this approach tend to become black-box models, in the sense that it is difficult to trace characters that are the base for the classification. The interpretability is therefore limited and the explanatory factors may not be based on reasonable visible characters. We investigate the explanatory factors of recent machine learning and deep learning models for plant classification tasks. Based on a Daucus carota and a Beta vulgaris image data set, we implement plant classification models and compare those models by their predictive performance as well as explainability. For comparison we implemented a feed forward convolutional neuronal network as a default model. To evaluate the performance, we trained an unsupervised Bayesian Gaussian process latent variable model as well as a convolutional autoencoder for feature extraction and rely on a support vector machine for classification. The explanatory factors of all models were extracted and analyzed. The experiments show, that feed forward convolutional neuronal networks (98.24\% and 96.10\% mean accuracy) outperforms the Bayesian Gaussian process latent variable pipeline (92.08\% and 94.31\% mean accuracy) as well as the convolutional autoenceoder pipeline (92.38\% and 93.28\% mean accuracy) based approaches in terms of classification accuracy, even though not significant for Beta vulgaris images. Additionally, we found that the neuronal network used biological uninterpretable image regions for the plant classification task. In contrast to that, the unsupervised learning models rely on explainable visual characters. We conclude that supervised convolutional neuronal networks must be used carefully to ensure biological interpretability. We recommend unsupervised machine learning, careful feature investigation, and statistical feature analysis for biological applications. View Full-Text Keywords: deep learning; machine learning; plant leaf morphometrics; explainable AI}, subject = {deep learning}, language = {en} }