@comment{{This file has been generated by bib2bib 1.96}}

@comment{{Command line: bib2bib -oc workshops.cit -ob workshops.bib -c $key:"IW$" 2018.bib 2017.bib 2016.bib 2015.bib 2014.bib 2013.bib 2012.bib 2011.bib 2010.bib 2009.bib 2008.bib 2007.bib 2006.bib 2005.bib 2004.bib 2003.bib 2002.bib 2001.bib 2000.bib 1999.bib 1998.bib 1997.bib 1996.bib}}

@inproceedings{AlbLamRig17-PLP-IW, author = {Marco Alberti and Evelina Lamma and Fabrizio Riguzzi and Riccardo Zese}, title = {A Distribution Semantics for non-{DL}-Safe Probabilistic Hybrid Knowledge Bases}, booktitle = {4th International Workshop on Probabilistic logic programming, PLP 2017}, editor = {Christian {Theil Have} and Riccardo Zese}, year = {2017}, pdf = {http://ceur-ws.org/Vol-1916/paper4.pdf}, volume = 1916, series = {CEUR Workshop Proceedings}, address = {Aachen, Germany}, issn = {1613-0073}, publisher = {Sun {SITE} Central Europe}, pages = {40-50}, scopus = {2-s2.0-85030093850}, abstract = {Logic Programming languages and Description Logics are based on different domain closure assumptions, closed and the open world assumption, respectively. Since many domains require both these assumptions, the combination of LP and DL have become of foremost importance. An especially successful approach is based on Minimal Knowledge with Negation as Failure (MKNF), whose semantics is used to define Hybrid KBs, composed of logic programming rules and description logic axioms. Following such idea, we have proposed an approach for defining DL-safe Probabilistic Hybrid Knowledge Bases, where each disjunct in the head of LP clauses and each DL axiom is annotated with a probability value, following the well known distribution semantics. In this paper, we show that this semantics can be unintuitive for non-DL-safe PHKBs, and we propose a new semantics that coincides with the previous one if the PHKB is DL-safe.}, keywords = {Hybrid Knowledge Bases, MKNF, Distribution Semantics} }

@inproceedings{NguLamRig17-PLP-IW, author = {Arnaud {Nguembang Fadja} and Evelina Lamma and Fabrizio Riguzzi}, title = {Deep Probabilistic Logic Programming}, booktitle = {Proceedings of the 4th International Workshop on Probabilistic logic programming, (PLP 2017)}, editor = {Christian {Theil Have} and Riccardo Zese}, year = {2017}, pdf = {http://ceur-ws.org/Vol-1916/paper1.pdf}, volume = 1916, series = {CEUR Workshop Proceedings}, address = {Aachen, Germany}, issn = {1613-0073}, publisher = {Sun {SITE} Central Europe}, pages = {3-14}, abstract = {Probabilistic logic programming under the distribution semantics has been very useful in machine learning. However, inference is expensive so machine learning algorithms may turn out to be slow. In this paper we consider a restriction of the language called hierarchical PLP in which clauses and predicates are hierarchically organized. In this case the language becomes truth-functional and inference reduces to the evaluation of formulas in the product fuzzy logic. Programs in this language can also be seen as arithmetic circuits or deep neural networks and inference can be reperformed quickly when the parameters change. Learning can then be performed by EM or backpropagation.}, keywords = {Probabilistic Logic Programming, Distribution Semantics, Deep Neural Networks, Arithmetic Circuits}, scopus = {2-s2.0-85030091907}, venue = {Orleans, FR}, eventdate = {2017-09-07} }

@inproceedings{ZamCanRig17-IMW-IW, author = {Cristian Zambelli and Giuseppe Cancelliere and Fabrizio Riguzzi and Evelina Lamma and Piero Olivo and Alessia Marelli and Rino Micheloni}, booktitle = {2017 IEEE International Memory Workshop (IMW)}, title = {Characterization of {TLC 3D-NAND} Flash Endurance through Machine Learning for {LDPC} Code Rate Optimization}, year = {2017}, pages = {1-4}, keywords = {Clustering algorithms;Computer architecture;Error correction codes;Flash memories;Optimization;Parity check codes;Reliability}, doi = {10.1109/IMW.2017.7939074}, month = {May}, publisher = {IEEE}, venue = {Monterey, CA, USA}, eventdate = {14-17 May 2017} }

@inproceedings{RigLamAlb17-URANIA-IW, title = {Probabilistic Logic Programming for Natural Language Processing }, author = {Fabrizio Riguzzi and Evelina Lamma and Marco Alberti and Elena Bellodi and Riccardo Zese and Giuseppe Cota}, pages = {30--37}, url = {http://ceur-ws.org/Vol-1802/}, pdf = {http://ceur-ws.org/Vol-1802/paper4.pdf}, booktitle = {{URANIA} 2016, Deep Understanding and Reasoning: A Challenge for Next-generation Intelligent Agents, Proceedings of the {AI*IA} Workshop on Deep Understanding and Reasoning: A Challenge for Next-generation Intelligent Agents 2016 co-located with 15th International Conference of the Italian Association for Artificial Intelligence ({AIxIA} 2016)}, year = 2017, editor = {Federico Chesani and Paola Mello and Michela Milano}, volume = 1802, series = {CEUR Workshop Proceedings}, address = {Aachen, Germany}, issn = {1613-0073}, venue = {Genova, Italy}, eventdate = {2016-11-28}, publisher = {Sun {SITE} Central Europe}, copyright = {by the authors}, abstract = {The ambition of Artificial Intelligence is to solve problems without human intervention. Often the problem description is given in human (natural) language. Therefore it is crucial to find an automatic way to understand a text written by a human. The research field concerned with the interactions between computers and natural languages is known under the name of Natural Language Processing (NLP), one of the most studied fields of Artificial Intelligence. In this paper we show that Probabilistic Logic Programming (PLP) is a suitable approach for NLP in various scenarios. For this purpose we use \texttt{cplint} on SWISH, a web application for Probabilistic Logic Programming. \texttt{cplint} on SWISH allows users to perform inference and learning with the framework \texttt{cplint} using just a web browser, with the computation performed on the server.}, keywords = {Probabilistic Logic Programming, Probabilistic Logical Inference, Natural Language Processing}, scopus = {2-s2.0-85015943369} }

@inproceedings{Rig16-PLP-IW, title = {Deductive and Inductive Probabilistic Programming (invited talk)}, author = {Fabrizio Riguzzi}, pages = {1--1}, url = {http://ceur-ws.org/Vol-1661/#invited-01}, pdf = {http://ceur-ws.org/Vol-1661/invited-01.pdf}, booktitle = {Proceedings of the 3nd International Workshop on Probabilistic Logic Programming ({PLP})}, year = 2016, editor = {Arjen Hommersom and Samer Abdallah}, volume = 1661, series = {CEUR Workshop Proceedings}, address = {Aachen, Germany}, issn = {1613-0073}, venue = {London, UK}, eventdate = {2016-09-03}, publisher = {Sun {SITE} Central Europe}, copyright = {by the authors}, abstract = {Probabilistic programming (PP) is available in two different variants: imperative/functional and logic. These two variants have complementary strengths and mostly separate communities. In this talk I will discuss how most strengths of inference for imperative/functional PP can be included in PLP. Moreover, I will show that PLP is particularly suitable for inductive reasoning. }, keywords = { Probabilistic Logic Programming, Probabilistic Programming, Distribution Semantics} }

@inproceedings{AlbBelCot16-PLP-IW, title = {Probabilistic Constraint Logic Theories}, author = {Marco Alberti and Elena Bellodi and Giuseppe Cota and Evelina Lamma and Fabrizio Riguzzi and Riccardo Zese}, pages = {15--28}, url = {http://ceur-ws.org/Vol-1661/#paper-02}, pdf = {http://ceur-ws.org/Vol-1661/paper-02.pdf}, booktitle = {Proceedings of the 3nd International Workshop on Probabilistic Logic Programming ({PLP})}, year = 2016, editor = {Arjen Hommersom and Samer Abdallah}, volume = 1661, series = {CEUR Workshop Proceedings}, address = {Aachen, Germany}, issn = {1613-0073}, venue = {London, UK}, eventdate = {2016-09-03}, publisher = {Sun {SITE} Central Europe}, copyright = {by the authors}, abstract = {Probabilistic logic models are used ever more often to deal with the uncertain relations typical of the real world. However, these models usually require expensive inference procedures. Very recently the problem of identifying tractable languages has come to the fore. In this paper we consider the models used by the learning from interpretations ILP setting, namely sets of integrity constraints, and propose a probabilistic version of them. A semantics in the style of the distribution semantics is adopted, where each integrity constraint is annotated with a probability. These probabilistic constraint logic models assign a probability of being positive to interpretations. This probability can be computed in a time that is logarithmic in the number of ground instantiations of violated constraints. This formalism can be used as the target language in learning systems and for declaratively specifying the behavior of a system. In the latter case, inference corresponds to computing the probability of compliance of a system's behavior to the model. }, keywords = { Probabilistic Logic Programming, Distribution Semantics, Constraint Logic Theories}, scopus = {2-s2.0-84987763948} }

@inproceedings{CotZes15-AIIADC-IW, title = {Learning Probabilistic Ontologies with Distributed Parameter Learning }, author = {Giuseppe Cota and Riccardo Zese and Elena Bellodi and Evelina Lamma and Fabrizio Riguzzi}, pages = {7--12}, pdf = {http://ceur-ws.org/Vol-1485/paper2.pdf}, booktitle = {Proceedings of the Doctoral Consortium (DC) co-located with the 14th Conference of the Italian Association for Artificial Intelligence (AI*IA 2015)}, year = 2015, editor = {Elena Bellodi and Alessio Bonfietti}, volume = 1485, series = {CEUR Workshop Proceedings}, address = {Aachen, Germany}, issn = {1613-0073}, venue = {Ferrara, Italy}, eventdate = {2015-09-23/24}, publisher = {Sun {SITE} Central Europe}, copyright = {by the authors}, abstract = { We consider the problem of learning both the structure and the parameters of Probabilistic Description Logics under DISPONTE. DISPONTE (“DIstribution Semantics for Probabilistic ONTologiEs”) adapts the distribution semantics for Probabilistic Logic Programming to Description Logics. The system LEAP for "LEArning Probabilistic description logics" learns both the structure and the parameters of DISPONTE knowledge bases (KBs) by exploiting the algorithms CELOE and EDGE. The former stands for "Class Expression Learning for Ontology Engineering" and it is used to generate good candidate axioms to add to the KB, while the latter learns the probabilistic parameters and evaluates the KB. EDGE for "Em over bDds for description loGics paramEter learning" is an algorithm for learning the parameters of probabilistic ontologies from data. In order to contain the computational cost, a distributed version of EDGE called EDGEMR was developed. EDGEMR exploits the MapReduce (MR) strategy by means of the Message Passing Interface. In this paper we propose the system LEAPMR. It is a re-engineered version of LEAP which is able to use distributed parallel parameter learning algorithms such as EDGEMR. }, keywords = {Probabilistic Description Logics, Structure Learning, Parameter Learning, MapReduce, Message Passing Interface. } }

@inproceedings{ZesBel15-AIIADC-IW, title = {Tableau Reasoners for Probabilistic Ontologies Exploiting Logic Programming Techniques}, author = {Riccardo Zese and Elena Bellodi and Fabrizio Riguzzi and Evelina Lamma}, pages = {1--6}, pdf = {http://ceur-ws.org/Vol-1485/paper1.pdf}, booktitle = {Proceedings of the Doctoral Consortium (DC) co-located with the 14th Conference of the Italian Association for Artificial Intelligence (AI*IA 2015)}, year = 2015, editor = {Elena Bellodi and Alessio Bonfietti}, volume = 1485, series = {CEUR Workshop Proceedings}, address = {Aachen, Germany}, issn = {1613-0073}, venue = {Ferrara, Italy}, eventdate = {2015-09-23/24}, publisher = {Sun {SITE} Central Europe}, copyright = {by the authors}, abstract = {The adoption of Description Logics for modeling real world domains within the Semantic Web is exponentially increased in the last years, also due to the availability of a large number of reasoning algorithms. Most of them exploit the tableau algorithm which has to manage non-determinism, a feature that is not easy to handle using procedural languages such as Java or C++. Reasoning on real world domains also requires the capability of managing probabilistic and uncertain information. We thus present TRILL, for "Tableau Reasoner for descrIption Logics in proLog" and TRILLP , for "TRILL powered by Pinpointing formulas", which implement the tableau algorithm and return the probability of queries. TRILLP , instead of the set of explanations for a query, computes a Boolean formula representing them, speeding up the computation. }, keywords = {Distribution Semantics, Probabilistic Semantic Web, Logic Programming, Description Logics}, scopus = {2-s2.0-85009168558} }

@inproceedings{Rig15-PLP-IW, title = {The Distribution Semantics is Well-Defined for All Normal Programs}, author = {Fabrizio Riguzzi}, pages = {69--84}, url = {http://ceur-ws.org/Vol-1413/#paper-06}, pdf = {http://ceur-ws.org/Vol-1413/paper-06.pdf}, booktitle = {Proceedings of the 2nd International Workshop on Probabilistic Logic Programming (PLP)}, year = 2015, editor = {Fabrizio Riguzzi and Joost Vennekens}, volume = 1413, series = {CEUR Workshop Proceedings}, address = {Aachen, Germany}, issn = {1613-0073}, venue = {Cork, Ireland}, eventdate = {2015-08-31}, publisher = {Sun {SITE} Central Europe}, copyright = {by the authors}, abstract = {The distribution semantics is an approach for integrating logic programming and probability theory that underlies many languages and has been successfully applied in many domains. When the program has function symbols, the semantics was defined for special cases: either the program has to be definite or the queries must have a finite number of finite explanations. In this paper we show that it is possible to define the semantics for all programs. }, keywords = {Distribution Semantics, Function Symbols, ProbLog, Probabilistic Logic Programming} }

@inproceedings{WieTorRig15-IULP-IW, booktitle = {International Workshop on User-Oriented Logic Programming {(IULP 2015)}}, editor = {Stefan Ellmauthaler and Claudia Schulz}, title = {{SWISH: SWI-Prolog} for Sharing}, author = {Jan Wielemaker and Torbj\"orn Lager and Fabrizio Riguzzi}, copyright = {by the authors}, year = {2015}, url = {http://arxiv.org/abs/1511.00915}, abstract = {Recently, we see a new type of interfaces for programmers based on web technology. For example, JSFiddle, IPython Notebook and R-studio. Web technology enables cloud-based solutions, embedding in tutorial web pages, attractive rendering of results, web-scale cooperative development, etc. This article describes SWISH, a web front-end for Prolog. A public website exposes SWIProlog using SWISH, which is used to run small Prolog programs for demonstration, experimentation and education. We connected SWISH to the ClioPatria semantic web toolkit, where it allows for collaborative development of programs and queries related to a dataset as well as performing maintenance tasks on the running server and we embedded SWISH in the Learn Prolog Now! online Prolog book. }, keywords = {Logic Programming, World Wide Web} }

@inproceedings{CotZesBel15-ECMLDC-IW, year = {2015}, booktitle = {Doctoral Consortium of the European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases}, editor = {Jaakko Hollmen and Panagiotis Papapetrou }, title = {Structure Learning with Distributed Parameter Learning for Probabilistic Ontologies}, author = {Giuseppe Cota and Riccardo Zese and Elena Bellodi and Evelina Lamma and Fabrizio Riguzzi}, pages = {75--84}, copyright = {by the authors}, url = {http://urn.fi/URN:ISBN:978-952-60-6443-7}, pdf = {https://aaltodoc.aalto.fi/bitstream/handle/123456789/18224/isbn9789526064437.pdf#page=79}, isbn = {978-952-60-6443-7}, issn = {1799-490X}, issn = {1799-4896}, abstract = {We consider the problem of learning both the structure and the parameters of Probabilistic Description Logics under DISPONTE. DISPONTE ("DIstribution Semantics for Probabilistic ONTologiEs") adapts the distribution semantics for Probabilistic Logic Programming to Description Logics. The system LEAP for "LEArning Probabilistic description logics" learns both the structure and the parameters of DISPONTE knowledge bases (KBs) by exploiting the algorithms CELOE and EDGE. The former stands for "Class Expression Learning for Ontology Engineering" and it is used to generate good candidate axioms to add to the KB, while the latter learns the probabilistic parameters and evaluates the KB. EDGE for "Em over bDds for description loGics paramEter learning" is an algorithm for learning the parameters of probabilistic ontologies from data. In order to contain the computational cost, a distributed version of EDGE called EDGEMR was developed. EDGEMR exploits the MapReduce (MR) strategy by means of the Message Passing Interface. In this paper we propose the system LEAPMR. It is a re-engineered version of LEAP which is able to use distributed parallel parameter learning algorithms such as EDGEMR.}, keywords = {Probabilistic Description Logics, Structure Learning, Parameter Learning, MapReduce, Message Passing Interface} }

@inproceedings{ZesBel15-OntoLP-IW, author = {Riccardo Zese and Elena Bellodi and Evelina Lamma and Fabrizio Riguzzi}, title = {Logic Programming Techniques for Reasoning with Probabilistic Ontologies}, booktitle = { Joint Ontology Workshops 2015, JOWO 2015 - Episode 1: The Argentine Winter of Ontology; Buenos Aires; Argentina; 25 July 2015 through 27 July 2015}, editor = {Odile Papini and Salem Benferhat and Laurent Garcia and Marie-Laure Mugnier and Eduardo Fermé and Thomas Meyer and Renata Wassermann and Torsten Hahmann and Ken Baclawski and Adila Krisnadhi and Pavel Klinov and Stefano Borgo and Oliver Kutz and Daniele Porello}, year = {2015}, pdf = {http://ceur-ws.org/Vol-1517/JOWO-15_ontolp_paper_3.pdf}, volume = 1517, series = {CEUR Workshop Proceedings}, address = {Aachen, Germany}, issn = {1613-0073}, venue = {Buenos Aires, Argentine}, eventdate = {2015-07-25/27}, publisher = {Sun {SITE} Central Europe}, keywords = {Description Logics, Tableau, Prolog, Semantic Web, Pinpoiting Formula}, abstract = {The increasing popularity of the Semantic Web drove to a widespread adoption of Description Logics (DLs) for modeling real world domains. To help the diffusion of DLs a large number of reasoning algorithms have been developed. Usually these algorithms are implemented in procedural languages such as Java or C++. Most of the reasoners exploit the tableau algorithm which has to manage non-determinism, a feature that is hard to handle using such languages. Reasoning on real world domains also requires the capability of managing probabilistic and uncertain information. We thus present TRILL for ``Tableau Reasoner for descrIption Logics in proLog'' that implements a tableau algorithm and is able to return explanations for the queries and the corresponding probability, and TRILL$^P$ for ``TRILL powered by Pinpointing formulas'' which is able to compute a Boolean formula representing the set of explanations for the query. This approach can speed up the process of computing the probability. Prolog non-determinism is used for easily handling the tableau's non-deterministic expansion rules.}, copyright = {CC0 \url{https://creativecommons.org/publicdomain/zero/1.0/}} }

@inproceedings{RigBelLamZes12-URSW12-IW, author = {Fabrizio Riguzzi and Elena Bellodi and Evelina Lamma and Riccardo Zese}, title = {Epistemic and Statistical Probabilistic Ontologies}, booktitle = {Proceedings of the 8th International Workshop on Uncertain Reasoning for the Semantic Web (URSW2012), Boston, USA, 11 November 2012}, year = {2012}, editor = {Fernando Bobillo and Rommel Carvalho and da Costa, Paulo C. G. and Nicola Fanizzi and Laskey, Kathryn B. and Laskey, Kenneth J. and Thomas Lukasiewicz and Trevor Martin and Matthias Nickles and Michael Pool}, series = {CEUR Workshop Proceedings}, publisher = {Sun {SITE} Central Europe}, issn = {1613-0073}, address = {Aachen, Germany}, number = {900}, pages = {3-14}, pdf = {http://ceur-ws.org/Vol-900/paper1.pdf}, abstract = {We present DISPONTE, a semantics for probabilistic ontologies that is based on the distribution semantics for probabilistic logic programs. In DISPONTE the axioms of a probabilistic ontology can be annotated with an epistemic or a statistical probability. The epistemic probability represents a degree of confidence in the axiom, while the statistical probability considers the populations to which the axiom is applied.} }

@inproceedings{BelRig12-AIIADC12-IW, title = {Parameter and Structure Learning Algorithms for Statistical Relational Learning}, pages = {5-9}, author = {Elena Bellodi and Fabrizio Riguzzi }, editor = {Paolo Liberatore and Michele Lombardi and Floriano Scioscia}, booktitle = {Doctoral Consortium of the 12th AI*IA Symposium on Artificial Intelligence, Proceedings of the Doctoral Consortium of the 12th Symposium of the Italian Association for Artificial Intelligence Rome, Italy, June 15, 2012}, copyright = {by the authors}, series = {CEUR Workshop Proceedings}, publisher = {Sun {SITE} Central Europe}, issn = {1613-0073}, address = {Aachen, Germany}, volume = {926}, year = {2012}, pdf = {http://ceur-ws.org/Vol-926/paper1.pdf}, scopus = {2-s2.0-84891770795 }, abstract = {My research activity focuses on the field of Machine Learning. Two key challenges in most machine learning applications are uncertainty and complexity. The standard framework for handling uncertainty is probability, for complexity is first-order logic. Thus we would like to be able to learn and perform inference in representation languages that combine the two. This is the focus of the field of Statistical Relational Learning. }, keywords = {Statistical relational learning, machine learning, first order logic} }

@inproceedings{RigBelLam12-DL12-IW, author = {Fabrizio Riguzzi and Elena Bellodi and Evelina Lamma}, title = {Probabilistic {Datalog+/-} under the Distribution Semantics}, booktitle = {Proceedings of the 25th International Workshop on Description Logics ({DL2012}), Roma, Italy, 7-10 June 2012}, editor = {Yevgeny Kazakov and Domenico Lembo and Frank Wolter}, year = {2012}, abstract = {We apply the distribution semantics for probabilistic ontologies (named DISPONTE) to the Datalog+/- language. In DISPONTE the formulas of a probabilistic ontology can be annotated with an epistemic or a statistical probability. The epistemic probability represents a degree of confidence in the formula, while the statistical probability considers the populations to which the formula is applied. The probability of a query is defined in terms of finite set of finite explanations for the query, where an explanation is a set of possibly instantiated formulas that is sufficient for entailing the query. The probability of a query is computed from the set of explanations by making them mutually exclusive. We also compare the DISPONTE approach for Datalog+/- ontologies with that of Probabilistic Datalog+/-, where an ontology is composed of a Datalog+/- theory whose formulas are associated to an assignment of values for the random variables of a companion Markov Logic Network. }, copyright = {by the authors}, series = {CEUR Workshop Proceedings}, publisher = {Sun {SITE} Central Europe}, issn = {1613-0073}, address = {Aachen, Germany}, url = {http://ds.ing.unife.it/~friguzzi/Papers/RigBelLam12-DL12.pdf}, pdf = {http://ceur-ws.org/Vol-846/paper_25.pdf}, volume = {846}, pages = {519-529} }

@inproceedings{BelLamRigAlb11-URSW11-IW, author = {Elena Bellodi and Evelina Lamma and Fabrizio Riguzzi and Simone Albani }, editor = {Fernando Bobillo and Rommel Carvalho and da Costa, Paulo C. G. and d'Amato, Claudia and Nicola Fanizzi and Laskey, Kathryn B. and Laskey, Kenneth J. and Thomas Lukasiewicz and Trevor Martin and Matthias Nickles and Michael Pool}, title = {A Distribution Semantics for Probabilistic Ontologies}, booktitle = {Proceedings ot the 7th International Workshop on Uncertainty Reasoning for the Semantic Web, Bonn, Germany, 23 October, 2011 }, year = {2011}, url = {http://ds.ing.unife.it/~friguzzi/Papers/BelLamRigAlb-URSW11.pdf}, series = {CEUR Workshop Proceedings}, publisher = {Sun {SITE} Central Europe}, issn = {1613-0073}, address = {Aachen, \Germany}, volume = {778}, pages = {75-86}, pdf = {http://ceur-ws.org/Vol-778/paper7.pdf}, abstract = {We present DISPONTE, a semantics for probabilistic ontologies that is based on the distribution semantics for probabilistic logic programs. In DISPONTE each axiom of a probabilistic ontology is annotated with a probability. The probabilistic theory defines thus a distribution over normal theories (called worlds) obtained by including an axiom in a world with a probability given by the annotation. The probability of a query is computed from this distribution with marginalization. We also present the system BUNDLE for reasoning over probabilistic OWL DL ontologies according to the DISPONTE semantics. BUNDLE is based on Pellet and uses its capability of returning explanations for a query. The explanations are encoded in a Binary Decision Diagram from which the probability of the query is computed.} }

@inproceedings{BelRig11-MCP11-IW, author = {Elena Bellodi and Fabrizio Riguzzi}, title = {An {Expectation Maximization} Algorithm for Probabilistic Logic Programs}, booktitle = {Proceedings of the Workshop on Mining Complex Patterns ({MCP2011}), 17 September 2011}, address = {Palermo, Italy}, editor = {Appice, Annalisa and Ceci, Michelangelo and Loglisci, Corrado and Manco, Giuseppe}, year = {2011}, month = sep, pages = {26-37}, abstract = { Recently much work in Machine Learning has concentrated on representation languages able to combine aspects of logic and probability, leading to the birth of a whole field called Statistical Relational Learning. In this paper we present a technique for parameter learning targeted to a family of formalisms where uncertainty is represented using Logic Programming tools - the so-called Probabilistic Logic Programs such as ICL, PRISM, ProbLog and LPAD. Since their equivalent Bayesian networks contain hidden variables, an EM algorithm is adopted. In order to speed the computation, expectations are computed directly on the Binary Decision Diagrams that are built for inference. The resulting system, called EMBLEM for ``EM over BDDs for probabilistic Logic programs Efficient Mining'', has been applied to a number of datasets and showed good performances both in terms of speed and memory. }, url = {http://ds.ing.unife.it/~friguzzi/Papers/BelRig-MCP11.pdf}, copyright = {by the authors}, keywords = { Statistical Relational Learning, Probabilistic Logic Programs, Logic Programs with Annotated Disjunction, Expectation Maximization, Binary Decision Diagrams} }

@inproceedings{RigSwi10-RCRA10-IW, author = {Fabrizio Riguzzi and Terrance Swift}, title = {Tabling and Answer Subsumption for Reasoning on Logic Programs with Annotated Disjunctions}, editor = {Marco Gavanelli and Toni Mancini}, booktitle = {Proceedings of the 17th {RCRA} International Workshop on Experimental Evaluation of Algorithms for Solving Problems with Combinatorial Explosion, Bologna, Italy, June 10-11, 2010}, year = {2010}, abstract = {The paper presents the algorithm ``Probabilistic Inference with Tabling and Answer subsumption'' (PITA) for computing the probability of queries from Logic Programs with Annotated Disjunctions. PITA is based on a program transformation techniques that adds an extra argument to every atom. PITA uses tabling for saving intermediate results and answer subsumption for combining different answers for the same subgoal. PITA has been implemented in XSB and compared with the ProbLog, cplint and CVE systems. The results show that in almost all cases, PITA is able to solve larger problems and is faster than competing algorithms.}, keywords = {Probabilistic Logic Programming, Tabling, Answer Subsumption, Logic Programs with Annotated Disjunction, Program Transformation}, url = {http://ceur-ws.org/Vol-616/}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/RigSwi-RCRA10.pdf}, series = {CEUR Workshop Proceedings}, publisher = {Sun {SITE} Central Europe}, issn = {1613-0073}, volume = {616}, pages = {1-10}, address = {Aachen, \Germany}, scopus = {2-s2.0-84893638592} }

@inproceedings{BelRigLam09-RICERCA-RCRA-IW, author = {Elena Bellodi and Fabrizio Riguzzi and Evelina Lamma}, title = {Mining Probabilistic Declarative Process Models}, booktitle = { Session {R.i.C.e.R.c.A}: RCRA Incontri E Confronti of the 16th RCRA International Workshop on Experimental evaluation of algorithms for solving problems with combinatorial explosion ({RCRA} 2009) Reggio Emilia, Italy, 11-12 December 2009}, editor = {Marco Gavanelli and Toni Mancini}, url = {http://ds.ing.unife.it/~friguzzi/Papers/BelRigLam09-RICERCA-RCRA-IW.pdf}, year = {2009}, keywords = {Process Mining, Learning from Interpretations, Business Processes, Probabilistic Relational Languages}, abstract = {The management of business processes has recently received a lot of attention from companies, since it can support efficiency improvement. We present an approach for mining process models that first induces a model in the SCIFF logical language and then translates the model into Markov logic, a language belonging to the field of statistical relational learning. Markov logic attaches weights to first-order contraints, in order to obtain a final probabilistic classification of process traces better than the purely logical one. The data used for learning and testing belong to a real database of university students' careers.} }

@inproceedings{BraRig09-RICERCA-RCRA-IW, author = {Stefano Bragaglia and Fabrizio Riguzzi}, title = {Approximate Inference for Logic Programs with Annotated Disjunctions}, booktitle = { Session {R.i.C.e.R.c.A}: RCRA Incontri E Confronti of the 16th RCRA International Workshop on Experimental evaluation of algorithms for solving problems with combinatorial explosion ({RCRA} 2009) Reggio Emilia, Italy, 11-12 December 2009}, editor = {Marco Gavanelli and Toni Mancini}, url = {http://ds.ing.unife.it/~friguzzi/Papers/BraRig09-RICERCA-RCRA-IW.pdf}, year = {2009}, keywords = {Probabilistic Reasoning, Probabilistic Logic Programming, Logic Programming, Logic Programs with Annotated Disjunctions}, abstract = {The paper presents two algoriothms for performing approximate inference on Logic Programs with Annotated Disjunctions: k-best and Monte Carlo. The first is based on branch and bound while the second is based on a stochastic approach.} }

@inproceedings{Rig09-RCRA-IW, author = {Fabrizio Riguzzi}, title = {The {SLGAD} Procedure for Inference on {Logic Programs with Annotated Disjunctions}}, booktitle = {Proceedings of the 15th {RCRA} workshop on Experimental Evaluation of Algorithms for Solving Problems with Combinatorial Explosion Udine, Italy, December 12-13, 2008}, editor = {Marco Gavanelli and Toni Mancini}, url = {http://ceur-ws.org/Vol-451/paper15riguzzi.pdf}, series = {CEUR Workshop Proceedings}, publisher = {Sun {SITE} Central Europe}, issn = {1613-0073}, volume = {451}, year = {2009}, address = {Aachen, \Germany}, copyright = {by the authors} }

@inproceedings{CheMelMon-BPI08-IW, title = {Checking Compliance of Execution Traces to Business Rules}, author = {Federico Chesani and Paola Mello and Marco Montali and Fabrizio Riguzzi and Maurizio Sebastianis and Sergio Storari}, booktitle = {Proceedings of the 4th Workshop on Business Process Intelligence (BPI 08)}, year = {2009}, series = { Lecture Notes in Business Information Processing}, publisher = {Springer}, note = {The original publication is available at \url{http://www.springerlink.com}}, volume = {17}, pages = {129--140}, address = {Heidelberg, \Germany}, abstract = {Complex and flexible business processes are critical not only because they are difficult to handle, but also because they often tend to loose their intelligibility. Verifying compliance of complex and flexible processes becomes therefore a fundamental requirement. We propose a framework for performing compliance checking of process execution traces w.r.t.~expressive reactive business rules, tailored to the MXML meta-model. Rules are mapped to Logic Programming, using Prolog to classify execution traces as compliant/non-compliant. We show how different rule templates, inspired by the ConDec language, can be easily specified and then customized in the context of a real industrial case study. We finally describe how the proposed language and its underlying a-posteriori reasoning technique have been concretely implemented as a ProM analysis plug-in.}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/CheMelMon-BPI08.pdf}, doi = {10.1007/978-3-642-00328-8_13}, url = {http://www.springerlink.com/content/uh46621176654767/}, copyright = {Springer} }

@inproceedings{LamRigStoMelMon-IPM07-IW, author = { Evelina Lamma and Fabrizio Riguzzi and Sergio Storari and Paola Mello and Marco Montali }, title = {Learning DecSerFlow Models from Labeled Traces}, booktitle = {Proceedings of the 1st International Workshop on the Induction of Process Models}, year = {2007}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/LamRigStoMelMon-IPM07.pdf}, abstract = {We present the system DecMiner that induces DecSerFlow models from positive and negative traces. The approach we follow consists in first inducing SCIFF constraints and then converting them into DecSerFlow ones. }, keywords = {Process mining, Process verification and validation, Logic Programming, DecSerFlow, Careflow} }

@inproceedings{Rig-RCRA07-IW, author = { Fabrizio Riguzzi }, title = {A Top Down Interpreter for {LPAD} and {CP}\--logic}, booktitle = {Proceedings of the 14th RCRA workshop Experimental Evaluation of Algorithms for Solving Problems with Combinatorial Explosion}, year = {2007}, pdf = {http://pst.istc.cnr.it/RCRA07/articoli/P19-riguzzi-RCRA07.pdf}, abstract = {Logic Programs with Annotated Disjunctions and CP-logic are two different but related languages for expressing probabilistic information in logic programming. The paper presents a top down interpreter for computing the probability of a query from a program in one of these two languages when the program is acyclic. The algorithm is based on the one available for ProbLog. The performances of the algorithm are compared with those of a Bayesian reasoner and with those of the ProbLog interpreter. On programs that have a small grounding, the Bayesian reasoner is more scalable, but programs with a large grounding require the top down interpreter. The comparison with ProbLog shows that, even if the added expressiveness effectively requires more computation resources, the top down interpreter can still solve problem of significant size. }, keywords = {Probabilistic Logic Programming, Logic Programs with Annotated Disjunction, Probabilistic Reasoning} }

@inproceedings{Rig-MRDM07-IW, author = { Fabrizio Riguzzi }, title = {Learning Ground ProbLog Programs from Interpretations}, booktitle = {Proceedings of the 6th Workshop on Multi-Relational Data Mining ({MRDM07})}, year = {2007}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/Rig-MRDM07.pdf}, keywords = {Probabilistic Logical Models, ProbLog, LPAD, Noisy Or}, abstract = {The relations between ProbLog and Logic Programs with Annotated Disjunctions imply that Boolean Bayesian networks can be represented as ground ProbLog programs and acyclic ground ProbLog programs can be represented as Boolean Bayesian networks. This provides a way of learning ground acyclic ProbLog programs from interpretations: first the interpretations are represented in tabular form, then a Bayesian network learning algorithm is applied and the learned network is translated into a ground ProbLog program. The program is then further analyzed in order to identify noisy or relations in it. The paper proposes an algorithm for such identification and presents an experimental analysis of its computational complexity.} }

@inproceedings{GamLamRigStoSca-DMFG07-IW, author = { Giacomo Gamberoni and Evelina Lamma and Fabrizio Riguzzi and Sergio Storari and Chiara Scapoli }, title = {Combining APRIORI and Bootstrap Techniques for Marker Analysis}, booktitle = {Proceedings of the Workshop Data Mining in Functional Genomics and Proteomics: Current Trends and Future Directions}, year = {2007}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/GamLamRigScaSto-DMFG07.pdf}, abstract = {In genetic studies, complex diseases are often analyzed searching for marker patterns that play a significant role in the susceptibility to the disease. In this paper we consider a dataset regarding periodontitis, that includes the analysis of nine genetic markers for 148 individuals. We analyze these data by using a novel subgroup discovering algorithm, named APRIORI-B, that is based on APRIORI and bootstrap techniques. This algorithm can use different metrics for rule selection. Experiments conducted by using as rule metrics novelty and confirmation, confirmed some previous results published on periodontitis.}, keywords = {Data Mining, Functional Genomics, Marker Analysis, Periodontitis} }

@inproceedings{GamLamRig06-DTMIB-IW, author = { Giacomo Gamberoni and Evelina Lamma and Fabrizio Riguzzi and Sergio Storari and Chiara Scapoli}, title = {Marker Analysis with APRIORI-Based Algorithms}, booktitle = {Notes from the Workshop on Data and Text Mining for Integrative Biology of the 17th European Conference on Machine Learning ({ECML}'2006) and the 10th European Conference on Principles and Practice of Knowledge Discovery in Databases ({PKDD}'2006)}, address = {Berlin, \Germany}, month = sep, year = {2006}, editor = {Melanie Hilario and Claire N\'edellec}, pages = {61--66}, http = {http://www.ecmlpkdd2006.org/ws-dtib.pdf}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/GamLamRig-DTMIB.pdf}, abstract = {In genetic studies, polygenic diseases are often analyzed searching for marker patterns that play a significant role in the susceptibility to the disease. In this paper we consider a dataset regarding periodontitis, that includes the analysis of nine genetic markers for 148 patients. We analyze these data by using two APRIORI-based algorithms: APRIORI-SD and APRIORI with filtering. The discovered rules (especially those found by APRIORI with filtering) confirmed the results published on periodontitis.} }

@inproceedings{Rig05-ILP05LateBreakingPapers-IW, author = {Fabrizio Riguzzi}, title = {Two Results Regarding Refinement Operators}, booktitle = {Late Breaking Papers, 15th International Workshop on Inductive Logic Programming ({ILP}05), Bonn, Germany, \August\ 10--13, 2005}, year = 2005, editor = {S. Kramer and B. Pfahringer}, month = jul, publisher = {Technische Universit\"{a}t M\"{u}nchen}, note = {Report {TUM}--{I0510}}, address = {M\"{u}nchen, \Germany}, pages = {53--58}, abstract = {In this paper we present two results regarding refinement operators. The first is that it does not exist a refinement operator that is both complete and optimal for the theta-subsumption ordering and for the language of full cla\USAl logic. The second regards the properties of the refinement operator implemented in Aleph's code by predicate auto\_refine/2. We think this operator is interesting for its simplicity and because it does not require the construction of a bottom-clause. In particular, the operator is useful in the cases where a bottom-clause can not be built, as for example in learning from interpretations. The properties of this operator are that it is locally finite, not proper nor complete but weakly complete. Moreover, the operator is also not optimal. However, it can be made complete by extending the specification of the language bias and by requiring that the language does not contain function symbols.}, keywords = {Inductive Logic Programming, Refinement Operators}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/Rig-ILP05.pdf} }

@inproceedings{Rig05-ILP05DiscChall-IW, author = {Fabrizio Riguzzi}, title = {A Simple Approach to a Multi-Label Classification Problem}, booktitle = {Discovery Challenge, Late Breaking Papers, 15th International Workshop on Inductive Logic Programming ({ILP}05), Bonn, Germany, \August\ 10--13, 2005}, year = 2005, editor = {S. Kramer and B. Pfahringer}, month = jul, publisher = {Technische Universit\"{a}t M\"{u}nchen}, note = {Report {TUM}--{I0510}}, address = {M\"{u}nchen, \Germany}, pages = {105--110}, abstract = {The approach to handle multiple label for each gene is to have a learning problem for each label that appears in \texttt{yeast.labelled}. In each learning problem, a gene is a positive example if it contains that label, otherwise it is a negative example. In this way we learn one classifier for each label. To label unseen genes, we run each generated classifier on the gene data and we assign the label to the gene if the classifiers gives a positive answer. As a classifier, we have used Tilde for its speed and good accuracy. In order to finish the experiments before the deadline we had to consider only a subset of the available data, namely the protein secondary structure data.}, keywords = {Inductive Logic Programming, Multiple Label Classification, Gene Ontology}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/Rig-ILP05chall.pdf} }

@inproceedings{Rig04-DISCCHALL-IW, author = {Fabrizio Riguzzi}, title = {Classification and visualization on the hepatitis dataset}, booktitle = {ECML/PKDD 2004 Discovery Challenge, Pisa, 20-24 September 2004}, editor = {Petr Berka and Bruno Cremilleux}, abstract = {In this paper we address goals 2 and 3 of those proposed by the donors of the Hepatitis dataset, namely to evaluate whether it is possible to estimate the stage of liver fibrosis from the results of examinations, and to evaluate the effectiveness of the interferon therapy. Goal 2 was addressed by learning various classifiers that predict the value of fibrosis from the values of examinations other than the biopsy. Unfortunately, the best accuracy obtained was only 50.6 \%, up only 2.1 \% from the performance of the default classifier, thus showing that replacing biopsies is still very hard if not impossible. As regards goal 3, we have plotted the distribution of the values of the difference in fibrosis and in activity before and after the interferon therapy. The plots show that the therapy actually reduces the level of activity but not the level of fibrosis. Moreover, we have also plotted the distribution of the values of the difference of GOT before and after the therapy. The graph shows that a moderate reduction of GOT is obtained.}, year = {2004}, month = sep, keywords = {Classification, Visualization}, url = {http://lisp.vse.cz/challenge/ecmlpkdd2004/final/riguzzi.ps}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/Rig-DISCCHALL04.pdf} }

@inproceedings{LamManMel01-IDAMAP01-IW, author = {Evelina Lamma and Marco Manservigi and Paola Mello and Annamaria Nanetti and Fabrizio Riguzzi and Sergio Storari }, title = {The Automatic Discovery of Alarm Rules for the Validation of Microbiological Data}, booktitle = {6th Internationl Workshop on Intelligent Data Analysis In Medicine And Pharmacology (IDAMAP2001)}, year = {2001}, month = sep, pages = {1--7}, address = {\London, \UK}, editor = {Bellazzi, R. and Zupan, B. and Liu, X.}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/LamManMel-IDAMAP01.pdf} }

@inproceedings{LamRigPer01a-CLIMA01-IW, author = {Evelina Lamma and Fabrizio Riguzzi and Lu\'\i{}s Moniz Pereira}, title = {Belief Revision by Multi-Agent Genetic Search}, booktitle = {{ICLP}01 2nd International Workshop on Computational Logic for Multi-Agent Systems ({CLIMA}01)}, abstract = {The revision of beliefs is an important general purpose functionality that an agent must exhibit. The agent usually needs to perform this task in cooperation with other agents, because access to knowledge and the knowledge itself are distributed in nature. In this work, we propose a new approach for performing belief revision in a society of logic-based agents, by means of a (distributed) genetic algorithm, where the revisable assumptions of each agent are coded into chromosomes as bit-strings. Each agent by itself locally performs a genetic search in the space of possible revisions of its knowledge, and exchanges genetic information by crossing its revisable chromosomes with those of other agents. We have performed experiments comparing the evolution in beliefs of a single agent informed of the whole of knowledge, to that of a society of agents, each agent accessing only part of the knowledge. In spite that the distribution of knowledge increases the difficulty of the problem, experimental results show that the solutions found in the multi-agent case are comparable in terms of accuracy to those obtained in the single agent case. The genetic algorithm we propose, besides encompassing the Darwinian operators of selection, mutation and crossover, also comprises a Lamarckian operator that mutates the genes in a chromosome as a consequence of the chromosome phenotype's individual experience obtained while solving a belief revision problem. These chromosomic mutations are directed by a logic-based belief revision procedure that relies on tracing the logical derivations leading to inconsistency of belief, so as to remove these derivations' support on the gene coded assumptions, effectively by mutating the latter. Because of the use a Lamarckian operator, and following the literature, the genes in these chromosomes that are modified by the Lamarckian operator are best dubbed ``memes'', since they code the memory of the experiences of an individual along its lifetime, besides being transmitted to its progeny. We believe our method to be important for situations where classical belief revision methods hardly apply: those where environments are non-uniform and time changing. These can be explored by distributed agents that evolve genetically to accomplish cooperative belief revision, if they use our approach.}, year = {2001}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/LamPerRig-CLIMA01a.pdf}, month = dec, keywords = {Genetic_Algorithms,Theory_Revision}, address = {Paphos, \Cyprus} }

@inproceedings{LamRigPer01b-CLIMA01-IW, author = {Evelina Lamma and Fabrizio Riguzzi and Lu\'\i{}s Moniz Pereira}, title = {A System for Multi-Agent Belief Revision by Genetic Search}, booktitle = {{ICLP}01 2nd International Workshop on Computational Logic for Multi-Agent Systems ({CLIMA}01)}, year = {2001}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/LamPerRig-CLIMA01b.pdf}, month = dec, keywords = {Genetic Algorithms,Theory Revision, Logic Prograimming, Belief Revision, Multi-Agent Systems}, address = {Paphos, \Cyprus}, abstract = {We consider a definition of the belief revision prob- lem that consists in removing a contradiction from an extended logic program by modifying the truth value of a selected set of literals called revis- ables. The program contains as well clauses with false in the head, representing integrity constraints. Any model of the program must ensure that the body of integrity constraints be false for the program to be non-contradictory. Contradiction may also arise in an extended logic program when both a literal L and its opposite :L are obtainable in the model of the program. Such a problem has been widely studied in the literature, and various so- lutions have been proposed that are based on abductive logic proof procedures. The system performs belief revision in a society of logic-based agents, by means of a (distributed) genetic algorithm. The problem can be modeled by means of a genetic algorithm, by assigning to each revisable of a logic program a gene in a chromosome. In the case of a two-valued revision, the gene will have the value 1 if the corresponding revisable is true and the value 0 if the revisable is false. The fitness function that is used in this case is represented in part by the percentage of integrity constraints that are satisfied by a chromosome.} }

@inproceedings{LamPerRig00-MSL00-IW, title = {Logic Aided {Lamarckian} Evolution}, author = {Evelina Lamma and Lu\'{i}s Moniz Pereira and Fabrizio Riguzzi}, booktitle = {Procs. of Multi-Strategy Learning Workshop (MSL00), Guimar\~{a}es, Portugal}, editor = {Pavel Brazdil and Ryszard S. Michalski}, publisher = {LIAAC - Universidade do Porto}, address = {Porto, \Portugal}, pages = {59--73}, month = jun, year = {2000}, keywords = {Genetic Algorithms,ILP Implementation,Theory Revision}, abstract = {We propose a multi-strategy genetic algorithm for performing belief revision. The algorithm implements a new evolutionary strategy which is a combination of the theories of Darwin and Lamarck. Therefore, the algorithm not only includes the Darwinian operators of selection, mutation and crossover but also a Lamarckian operator that changes the individuals so that they perform better in solving the given problem. This is achieved through belief revision directed mutations, oriented by tracing logical derivations. The algorithm, with and without the Lamarckian operator, is tested on a number of belief revision problems, and the results show that the addition of the Lamarckian operator improves the efficiency of the algorithm. We believe that the combination of Darwinian and Lamarckian operators will be useful not only for standard belief revision problems but especially for problems where the chromosomes may be exposed to different constraints and observations. In these cases, the Lamarckian and Darwinian operators would play a different role: the Lamarckian one would be used in order to bring a chromosome closer to a solution or to find an exact solution of the current belief revision problem, while Darwinian ones will have the aim of preparing chromosomes to deal with new situations by exchanging genes among them.}, url = {http://ds.ing.unife.it/~friguzzi/Papers/LamPerRig-MSL00.pdf} }

@inproceedings{LamManMel00-IDAMAP00-IW, title = {A System for Monotoring Nosocomial Infections}, author = {Evelina Lamma and Marco Manservigi and Paola Mello and Fabrizio Riguzzi and Roberto Serra and Sergio Storari }, booktitle = {ECAI2000 Workshop on Intelligent Data Analysis in Medicine and Pharmacology ({IDAMAP}-2000), Berlin, 20-25 August 2000}, editor = {Nada Lavra\v{c} and Silvia Miksch and Branko Kav\v{s}ek }, publisher = {ECAI Workshop Notes}, month = aug, year = {2000}, address = {\Berlin, \Germany}, pages = {17--19}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/LamManMel-IDAMAP00.pdf} }

@inproceedings{CucMelPic00-MLCV00-IW, title = {An Application of Machine Learning and Statistics to Defect Detection}, author = { Rita Cucchiara and Paola Mello and Massimo Piccardi and Fabrizio Riguzzi}, booktitle = {ECAI2000 Workshop on Machine Learning in Computer Vision (MLCV00), Berlin, 22 August 2000}, editor = {Floriana Esposito and Donato Malerba}, publisher = {ECAI Workshop Notes}, month = aug, year = {2000}, address = {\Berlin, \Germany}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/CucMelPic-MLCV00.pdf} }

@inproceedings{LamRigPer99-ILP99LateBreakingPapers-IW, author = {Evelina Lamma and Fabrizio Riguzzi and Lu\'{i}s Moniz Pereira}, title = {Learning Three-valued Logic Programs}, booktitle = {Late Breaking Paper, 9th International Workshop on Inductive Logic Programming (ILP99)}, year = 1999, editor = {S. Dzeroski and P. Flach}, month = jun, address = {Bled, \Slovenia}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/LamRigPer-ILP99.pdf} }

@inproceedings{LamRigPer99-ACAI99-IW, author = {Evelina Lamma and Fabrizio Riguzzi and Lu\'{i}s Moniz Pereira}, title = {Agents Learning in a Three-Valued Logical Setting}, booktitle = {Workshop on Machine Learning and Intelligent Agents Advanced Course on Artificial Intelligence 1999 (ACAI'99)}, year = 1999, address = {Crete, \Greece}, month = jul, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/LamRigPer-ACAI99.pdf} }

@inproceedings{LamRigPer98-LPNMR98-IW, author = {Evelina Lamma and Fabrizio Riguzzi and Lu\'{i}s Moniz Pereira}, title = {Learning with Extended Logic Programs}, booktitle = {Proceedings of the Logic Programming track of the Seventh International Workshop on Nonmonotonic Reasoning ({LP-NMR98}), Trento, Italy, May 30 - June 1, 1998}, year = 1998, editor = {Juergen Dix and Jorge Lobo}, month = may, publisher = {Universit\"at Koblenz\--Landau, Institut f\"ur Informatik}, pages = {1--9}, address = {Koblenz, \Germany}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/LamRigPer98-LPNMR98.pdf}, url = {http://www.cs.man.ac.uk/~jdix/NMR7/SUBMISSIONS/riguzzi.ps.gz}, abstract = {We discuss the adoption of a three-valued setting for inductive concept learning. Distinguishing between what is true, what is false and what is unknown can be useful in situations where decisions have to be taken on the basis of scarce information. In a three-valued setting, we want to learn a definition for both the target concept and its opposite, considering positive and negative examples as instances of two disjoint classes. To this purpose, we adopt extended logic programs under a well-founded semantics as the representation formalism for learning. In this way, we are able to represent both the concept and its opposite and deal with incomplete or unknown information. We discuss various approaches to be adopted in order to handle possible inconsistencies. Default negation is used to ensure consistency and to handle exceptions to general rules. Exceptions to a positive concept are identified from negative examples, whereas exceptions to a negative concept are identified from positive examples. Exceptions can be generalized, in their turn, by learning within a hierarchy of defaults.}, keywords = {Inductive Logic Programming, Extended Logic Programs} }

@inproceedings{LamRigPer98-MSL98-IW, author = {Evelina Lamma and Fabrizio Riguzzi and Lu\'{i}s Moniz Pereira}, title = {Strategies for Learning with Extended Logic Programs}, booktitle = {Proceedings of the Fourth International Workshop on Multistrategy Learning ({MSL98}), Desenzano del Garda, Italy, 11--13 June 1998}, year = 1998, publisher = {Dipartimento di Informatica, Universit\`{a} di Torino}, month = jun, address = {Torino, \Italy}, pages = {99--108}, editor = {Floriana Esposito and Ryszard Michalski and Lorenza Saitta} }

@inproceedings{Rig98-COMPULOG98-IW, author = {Fabrizio Riguzzi}, title = {Learning in a Three-valued Setting}, booktitle = {Proceedings of the CompulogNet Area Meeting ``Computational Logic and Machine Learning'', June 1998, Bristol, UK}, year = 1998, month = jun, pages = {63--69}, publisher = {University of Manchester}, address = {Manchester, \UK}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/Rig98-COMPULOG98.pdf}, abstract = {We discuss the adoption of a three-valued setting for inductive concept learning. Distinguishing between what is true, what is false and what is unknown is necessary in situations where decisions have to be taken on the basis of scarce information. We propose a learning algorithm that adopts extended logic programs under a well-founded semantics as the representation formalism and learns a definition for both the target concept and its opposite, considering positive and negative examples as instances of two disjoint classes. In the target program, default negation is used to ensure consistency and to handle exceptions to general rules. Exceptions to a positive concept are identified from negative examples, whereas exceptions to a negative concept are identified from positive examples. Exceptions can be generalized, in their turn, resulting in a hierarchy of defaults. }, keywords = {Inductive Logic Programming, Extended Logic Programs} }

@inproceedings{LamMelMil97-LOPSTR97-IW, author = {Evelina ~Lamma AND Paola ~Mello AND Michela ~Milano AND Fabrizio ~Riguzzi}, title = {Integrating Extensional and Intensional {ILP} Systems through Abduction}, editor = {Norbert E. Fuchs}, booktitle = {{LOPSTR97}, Proceedings of the 7th International Workshop on Logic Program Synthesis and Transformation, Leuven, Belgium, July 10-12, 1997}, year = 1997, keywords = {Abduction, Negation, Integrity Constraints, Inductive Logic Programming}, month = jul, address = {Leuven, \Belgium}, pages = {1--8}, publisher = {Department of Computer Science, Katholieke Universiteit Leuven}, volume = {Report CW 253}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/LamMelMil-LOPSTR97.pdf}, abstract = {We present an hybrid extensional-intensional Inductive Logic Programming algorithm. We then show how this algorithm solves the problem of global inconsistency of intensional systems when learning multiple predicates, without incurring in the problems of incompleteness and inconsistency of extensional systems. The algorithm is obtained by modifying an intensional system for learning abductive logic programs. Extensionality is thus obtained by exploiting abduction: the training set is considered as a set of abduced literals that is taken as input by the abductive proof procedure used for the coverage of examples. } }

@inproceedings{RIG97-COMPULOG97-IW, author = {Fabrizio Riguzzi}, title = {Using Abductive Logic Programming as a Representation Formalism for {ILP}}, booktitle = {Proceedings of the CompulogNet Area Meeting ``Computational Logic and Machine Learning: Representation Issues in Reasoning and Learning''}, year = 1997, month = sep, keywords = {Abduction, Knowledge Representation, Inductive Logic Programming}, address = {Prague, \CzechRepublic}, pages = {15--18}, publisher = {Compulog Net}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/Rig-COMPULOG97.pdf}, abstract = {In this paper we summarize the work done by the nodes of Bologna and Cyprus on the use of Abductive Logic Programming (ALP) as a representation formalism for Inductive Logic Programming. In this case, both the background knowledge and the target program are abductive logic programs and the coverage of examples through the resolution proof procedure of Prolog is replaced by coverage through an abductive proof procedure. } }

@inproceedings{LamMelMil97-LPKR97-IW, author = {Evelina ~Lamma AND Paola ~Mello AND Michela ~Milano AND Fabrizio ~Riguzzi}, title = {A System for Learning Abductive Logic Programs}, booktitle = {Proceedings of the {ILPS97} Workshop on Logic Programming and Knowledge Representation (LPKR97), Port Jefferson, New York, USA, October 17, 1997}, editor = {J. Dix and L. M. Pereira and T. Przymusinski}, year = {1997}, publisher = {Universit\"at Koblenz\--Landau, Institut f\"ur Informatik}, keywords = {Abduction, Negation, Integrity_Constraints}, month = oct, address = {Koblenz, \Germany}, pages = {55--66}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/LamMelMil-LPKR97-IW.pdf}, abstract = {We present the system LAP for learning abductive logic programs from examples and from a background abductive theory. A new type of induction problem has been defined as an extension of the Inductive Logic Programming framework. In the new problem definition, both the background and the target theories are abductive logic programs and the coverage of examples is replaced by abductive coverage. LAP is based on a top-down learning algorithm that has been suitably extended in order to solve the new induction problem. In particular, the testing of example coverage is performed by using the abductive proof procedure defined by Kakas and Mancarella. Assumptions can be made in order to cover positive examples and rule out negative ones and these assumptions can be used as new training data. LAP can be applied for learning in presence of incomplete knowledge and for learning exceptions to classification rules.} }

@inproceedings{EspLamMal96-IW, author = {Floriana Esposito AND Evelina Lamma AND Donato Malerba AND Paola Mello AND Michela Milano AND Fabrizio Riguzzi AND Giovanni Semeraro}, title = {Learning Abductive Logic Programs}, year = 1996, editor = {Peter A. Flach and Antonis C. Kakas}, booktitle = {Proceedings of the {ECAI96} Workshop on Abductive and Inductive Reasoning}, month = aug, keywords = {Abduction, Negation, Integrity_Constraints}, address = {Budapest, \Hungary}, pages = {23--30}, pdf = {http://ds.ing.unife.it/~friguzzi/Papers/EspLamMal-ABDIND96.pdf} }

*This file was generated by
bibtex2html 1.96.*