aboutsummaryrefslogtreecommitdiff
path: root/src/main/scala/uk
diff options
context:
space:
mode:
authorFederico Igne <federico.igne@cs.ox.ac.uk>2022-05-12 12:58:40 +0100
committerFederico Igne <federico.igne@cs.ox.ac.uk>2022-05-12 12:58:40 +0100
commit227e4609bba9969cb7f39e52715fa62cf93cbfbf (patch)
tree79041ebefd7ff2763179309955d47484002b647c /src/main/scala/uk
parent6532db6a2cc9131e8039bd0c0e53190cc30b3599 (diff)
downloadACQuA-227e4609bba9969cb7f39e52715fa62cf93cbfbf.tar.gz
ACQuA-227e4609bba9969cb7f39e52715fa62cf93cbfbf.zip
Add initial implementation of ACQuA reasoner
Diffstat (limited to 'src/main/scala/uk')
-rw-r--r--src/main/scala/uk/ac/ox/cs/acqua/reasoner/AcquaQueryReasoner.scala489
1 files changed, 489 insertions, 0 deletions
diff --git a/src/main/scala/uk/ac/ox/cs/acqua/reasoner/AcquaQueryReasoner.scala b/src/main/scala/uk/ac/ox/cs/acqua/reasoner/AcquaQueryReasoner.scala
new file mode 100644
index 0000000..3e65218
--- /dev/null
+++ b/src/main/scala/uk/ac/ox/cs/acqua/reasoner/AcquaQueryReasoner.scala
@@ -0,0 +1,489 @@
1/*
2 * Copyright 2021,2022 KRR Oxford
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package uk.ac.ox.cs.acqua.reasoner
18
19import org.semanticweb.karma2.profile.ELHOProfile;
20import org.semanticweb.owlapi.model.OWLOntology;
21// import org.semanticweb.owlapi.model.parameters.Imports;
22// import uk.ac.ox.cs.JRDFox.JRDFStoreException;
23import uk.ac.ox.cs.pagoda.multistage.MultiStageQueryEngine
24// import uk.ac.ox.cs.pagoda.owl.EqualitiesEliminator;
25// import uk.ac.ox.cs.pagoda.owl.OWLHelper;
26// import uk.ac.ox.cs.pagoda.query.AnswerTuples;
27// import uk.ac.ox.cs.pagoda.query.GapByStore4ID;
28// import uk.ac.ox.cs.pagoda.query.GapByStore4ID2;
29import uk.ac.ox.cs.pagoda.query.QueryRecord
30// import uk.ac.ox.cs.pagoda.query.QueryRecord.Step;
31import uk.ac.ox.cs.pagoda.reasoner.{ConsistencyManager,MyQueryReasoner,QueryReasoner}
32import uk.ac.ox.cs.pagoda.reasoner.light.{KarmaQueryEngine,BasicQueryEngine}
33import uk.ac.ox.cs.pagoda.rules.DatalogProgram
34// import uk.ac.ox.cs.pagoda.summary.HermitSummaryFilter;
35// import uk.ac.ox.cs.pagoda.tracking.QueryTracker;
36// import uk.ac.ox.cs.pagoda.tracking.TrackingRuleEncoder;
37// import uk.ac.ox.cs.pagoda.tracking.TrackingRuleEncoderDisjVar1;
38// import uk.ac.ox.cs.pagoda.tracking.TrackingRuleEncoderWithGap;
39// import uk.ac.ox.cs.pagoda.util.ExponentialInterpolation;
40// import uk.ac.ox.cs.pagoda.util.PagodaProperties;
41import uk.ac.ox.cs.pagoda.util.Timer;
42import uk.ac.ox.cs.pagoda.util.Utility
43// import uk.ac.ox.cs.pagoda.util.disposable.DisposedException;
44// import uk.ac.ox.cs.pagoda.util.tuples.Tuple;
45import uk.ac.ox.cs.rsacomb.ontology.Ontology
46
47// import java.util.Collection;
48// import java.util.LinkedList;
49
50class AcquaQueryReasoner(var ontology: Ontology)
51 extends QueryReasoner {
52
53// OWLOntology ontology;
54// OWLOntology elho_ontology;
55// DatalogProgram program;
56
57 private var lazyUpperStore: Option[MultiStageQueryEngine] = None;
58// TrackingRuleEncoder encoder;
59
60
61// private Collection<String> predicatesWithGap = null;
62//// private int relevantOntologiesCounter = 0;
63 private val timer: Timer = new Timer();
64
65 private var _isConsistent: ConsistencyStatus = StatusUnchecked
66 // TODO: explicit casting to MyQueryReasoner makes no sense. Find
67 // another solution. Probably requires changing PAGOdA source code.
68 private val consistencyManager: ConsistencyManager = new ConsistencyManager(this.asInstanceOf[MyQueryReasoner])
69
70 private val rlLowerStore: BasicQueryEngine = new BasicQueryEngine("rl-lower-bound")
71 private val elLowerStore: KarmaQueryEngine = new KarmaQueryEngine("elho-lower-bound")
72
73 private val trackingStore = new MultiStageQueryEngine("tracking", false);
74
75 /* Load ontology into PAGOdA */
76 private val datalog = new DatalogProgram(ontology.origin);
77 //datalog.getGeneral().save();
78 if (!datalog.getGeneral().isHorn())
79 lazyUpperStore = Some(new MultiStageQueryEngine("lazy-upper-bound", true))
80 importData(datalog.getAdditionalDataFile());
81 private val elhoOntology: OWLOntology = new ELHOProfile().getFragment(ontology.origin);
82 elLowerStore.processOntology(elhoOntology);
83
84
85 /** Performs nothing.
86 *
87 * Loading of the ontology is performed at instance creation to avoid
88 * unnecessary complexity (see main class constructor).
89 *
90 * @note Implemented for compatibility with other reasoners.
91 */
92 def loadOntology(ontology: OWLOntology): Unit = { }
93
94 def preprocess(): Boolean = {
95 ???
96 }
97
98 /** Returns a the consistency status of the ontology.
99 *
100 * Performs a consistency check if the current status is undefined.
101 * Some logging is performed as well.
102 *
103 * @returns true if the ontology is consistent, false otherwise.
104 */
105 def isConsistent(): Boolean = {
106 if (_isConsistent == StatusUnchecked) {
107 _isConsistent = consistencyManager.check()
108 Utility logDebug s"time for satisfiability checking: ${timer.duration()}"
109 }
110 Utility logInfo s"The ontology is ${_isConsistent}!"
111 return _isConsistent.asBoolean
112 }
113
114 def evaluate(query: QueryRecord): Unit = {
115 ???
116 }
117
118 def evaluateUpper(record: QueryRecord): Unit= ???
119
120// public Collection<String> getPredicatesWithGap() {
121// if(isDisposed()) throw new DisposedException();
122// return predicatesWithGap;
123// }
124
125// @Override
126// public boolean preprocess() {
127// if(isDisposed()) throw new DisposedException();
128
129// t.reset();
130// Utility.logInfo("Preprocessing (and checking satisfiability)...");
131
132// String name = "data", datafile = getImportedData();
133// rlLowerStore.importRDFData(name, datafile);
134// rlLowerStore.materialise("lower program", program.getLower().toString());
135//// program.getLower().save();
136// if(!consistency.checkRLLowerBound()) {
137// Utility.logDebug("time for satisfiability checking: " + t.duration());
138// isConsistent = ConsistencyStatus.INCONSISTENT;
139// return false;
140// }
141// Utility.logDebug("The number of sameAs assertions in RL lower store: " + rlLowerStore.getSameAsNumber());
142
143// String originalMarkProgram = OWLHelper.getOriginalMarkProgram(ontology);
144
145// elLowerStore.importRDFData(name, datafile);
146// elLowerStore.materialise("saturate named individuals", originalMarkProgram);
147// elLowerStore.materialise("lower program", program.getLower().toString());
148// elLowerStore.initialiseKarma();
149// if(!consistency.checkELLowerBound()) {
150// Utility.logDebug("time for satisfiability checking: " + t.duration());
151// isConsistent = ConsistencyStatus.INCONSISTENT;
152// return false;
153// }
154
155// if(lazyUpperStore != null) {
156// lazyUpperStore.importRDFData(name, datafile);
157// lazyUpperStore.materialise("saturate named individuals", originalMarkProgram);
158// int tag = lazyUpperStore.materialiseRestrictedly(program, null);
159// if(tag == -1) {
160// Utility.logDebug("time for satisfiability checking: " + t.duration());
161// isConsistent = ConsistencyStatus.INCONSISTENT;
162// return false;
163// }
164// else if(tag != 1) {
165// lazyUpperStore.dispose();
166// lazyUpperStore = null;
167// }
168// }
169// if(consistency.checkUpper(lazyUpperStore)) {
170// isConsistent = ConsistencyStatus.CONSISTENT;
171// Utility.logDebug("time for satisfiability checking: " + t.duration());
172// }
173
174// trackingStore.importRDFData(name, datafile);
175// trackingStore.materialise("saturate named individuals", originalMarkProgram);
176
177//// materialiseFullUpper();
178//// GapByStore4ID gap = new GapByStore4ID(trackingStore);
179// GapByStore4ID gap = new GapByStore4ID2(trackingStore, rlLowerStore);
180// trackingStore.materialiseFoldedly(program, gap);
181// predicatesWithGap = gap.getPredicatesWithGap();
182// gap.clear();
183
184// if(program.getGeneral().isHorn())
185// encoder = new TrackingRuleEncoderWithGap(program.getUpper(), trackingStore);
186// else
187// encoder = new TrackingRuleEncoderDisjVar1(program.getUpper(), trackingStore);
188//// encoder = new TrackingRuleEncoderDisj1(program.getUpper(), trackingStore);
189//// encoder = new TrackingRuleEncoderDisjVar2(program.getUpper(), trackingStore);
190//// encoder = new TrackingRuleEncoderDisj2(program.getUpper(), trackingStore);
191
192// // TODO? add consistency check by Skolem-upper-bound
193
194// if(!isConsistent())
195// return false;
196
197// consistency.extractBottomFragment();
198
199// return true;
200// }
201
202// @Override
203// public void evaluate(QueryRecord queryRecord) {
204// if(isDisposed()) throw new DisposedException();
205
206// if(queryLowerAndUpperBounds(queryRecord))
207// return;
208
209// OWLOntology relevantOntologySubset = extractRelevantOntologySubset(queryRecord);
210
211//// queryRecord.saveRelevantOntology("/home/alessandro/Desktop/test-relevant-ontology-"+relevantOntologiesCounter+".owl");
212//// relevantOntologiesCounter++;
213
214// if(properties.getSkolemUpperBound() == PagodaProperties.SkolemUpperBoundOptions.BEFORE_SUMMARISATION
215// && querySkolemisedRelevantSubset(relevantOntologySubset, queryRecord)) {
216// return;
217// }
218
219// Utility.logInfo(">> Summarisation <<");
220// HermitSummaryFilter summarisedChecker = new HermitSummaryFilter(queryRecord, properties.getToCallHermiT());
221// if(summarisedChecker.check(queryRecord.getGapAnswers()) == 0) {
222// summarisedChecker.dispose();
223// return;
224// }
225
226// if(properties.getSkolemUpperBound() == PagodaProperties.SkolemUpperBoundOptions.AFTER_SUMMARISATION
227// && querySkolemisedRelevantSubset(relevantOntologySubset, queryRecord)) {
228// summarisedChecker.dispose();
229// return;
230// }
231
232// Utility.logInfo(">> Full reasoning <<");
233// Timer t = new Timer();
234// summarisedChecker.checkByFullReasoner(queryRecord.getGapAnswers());
235// Utility.logDebug("Total time for full reasoner: " + t.duration());
236
237// if(properties.getToCallHermiT())
238// queryRecord.markAsProcessed();
239// summarisedChecker.dispose();
240// }
241
242// @Override
243// public void evaluateUpper(QueryRecord queryRecord) {
244// if(isDisposed()) throw new DisposedException();
245// // TODO? add new upper store
246// AnswerTuples rlAnswer = null;
247// boolean useFull = queryRecord.isBottom() || lazyUpperStore == null;
248// try {
249// rlAnswer =
250// (useFull ? trackingStore : lazyUpperStore).evaluate(queryRecord.getQueryText(), queryRecord.getAnswerVariables());
251// queryRecord.updateUpperBoundAnswers(rlAnswer, true);
252// } finally {
253// if(rlAnswer != null) rlAnswer.dispose();
254// }
255// }
256
257// @Override
258// public void dispose() {
259// super.dispose();
260
261// if(encoder != null) encoder.dispose();
262// if(rlLowerStore != null) rlLowerStore.dispose();
263// if(lazyUpperStore != null) lazyUpperStore.dispose();
264// if(elLowerStore != null) elLowerStore.dispose();
265// if(trackingStore != null) trackingStore.dispose();
266// if(consistency != null) consistency.dispose();
267// if(program != null) program.dispose();
268// }
269
270// protected void internal_importDataFile(String name, String datafile) {
271//// addDataFile(datafile);
272// rlLowerStore.importRDFData(name, datafile);
273// if(lazyUpperStore != null)
274// lazyUpperStore.importRDFData(name, datafile);
275// elLowerStore.importRDFData(name, datafile);
276// trackingStore.importRDFData(name, datafile);
277// }
278
279// /**
280// * It deals with blanks nodes differently from variables
281// * according to SPARQL semantics for OWL2 Entailment Regime.
282// * <p>
283// * In particular variables are matched only against named individuals,
284// * and blank nodes against named and anonymous individuals.
285// */
286// private boolean queryUpperStore(BasicQueryEngine upperStore, QueryRecord queryRecord,
287// Tuple<String> extendedQuery, Step step) {
288// t.reset();
289
290// Utility.logDebug("First query type");
291// queryUpperBound(upperStore, queryRecord, queryRecord.getQueryText(), queryRecord.getAnswerVariables());
292// if(!queryRecord.isProcessed() && !queryRecord.getQueryText().equals(extendedQuery.get(0))) {
293// Utility.logDebug("Second query type");
294// queryUpperBound(upperStore, queryRecord, extendedQuery.get(0), queryRecord.getAnswerVariables());
295// }
296// if(!queryRecord.isProcessed() && queryRecord.hasNonAnsDistinguishedVariables()) {
297// Utility.logDebug("Third query type");
298// queryUpperBound(upperStore, queryRecord, extendedQuery.get(1), queryRecord.getDistinguishedVariables());
299// }
300
301// queryRecord.addProcessingTime(step, t.duration());
302// if(queryRecord.isProcessed()) {
303// queryRecord.setDifficulty(step);
304// return true;
305// }
306// return false;
307// }
308
309// /**
310// * Returns the part of the ontology relevant for Hermit, while computing the bound answers.
311// */
312// private boolean queryLowerAndUpperBounds(QueryRecord queryRecord) {
313
314// Utility.logInfo(">> Base bounds <<");
315
316// AnswerTuples rlAnswer = null, elAnswer = null;
317
318// t.reset();
319// try {
320// rlAnswer = rlLowerStore.evaluate(queryRecord.getQueryText(), queryRecord.getAnswerVariables());
321// Utility.logDebug(t.duration());
322// queryRecord.updateLowerBoundAnswers(rlAnswer);
323// } finally {
324// if(rlAnswer != null) rlAnswer.dispose();
325// }
326// queryRecord.addProcessingTime(Step.LOWER_BOUND, t.duration());
327
328// Tuple<String> extendedQueryTexts = queryRecord.getExtendedQueryText();
329
330// if(properties.getUseAlwaysSimpleUpperBound() || lazyUpperStore == null) {
331// Utility.logDebug("Tracking store");
332// if(queryUpperStore(trackingStore, queryRecord, extendedQueryTexts, Step.SIMPLE_UPPER_BOUND))
333// return true;
334// }
335
336// if(!queryRecord.isBottom()) {
337// Utility.logDebug("Lazy store");
338// if(lazyUpperStore != null && queryUpperStore(lazyUpperStore, queryRecord, extendedQueryTexts, Step.LAZY_UPPER_BOUND))
339// return true;
340// }
341
342// t.reset();
343// try {
344// elAnswer = elLowerStore.evaluate(extendedQueryTexts.get(0),
345// queryRecord.getAnswerVariables(),
346// queryRecord.getLowerBoundAnswers());
347// Utility.logDebug(t.duration());
348// queryRecord.updateLowerBoundAnswers(elAnswer);
349// } finally {
350// if(elAnswer != null) elAnswer.dispose();
351// }
352// queryRecord.addProcessingTime(Step.EL_LOWER_BOUND, t.duration());
353
354// if(queryRecord.isProcessed()) {
355// queryRecord.setDifficulty(Step.EL_LOWER_BOUND);
356// return true;
357// }
358
359// return false;
360// }
361
362// private OWLOntology extractRelevantOntologySubset(QueryRecord queryRecord) {
363// Utility.logInfo(">> Relevant ontology-subset extraction <<");
364
365// t.reset();
366
367// QueryTracker tracker = new QueryTracker(encoder, rlLowerStore, queryRecord);
368// OWLOntology relevantOntologySubset = tracker.extract(trackingStore, consistency.getQueryRecords(), true);
369
370// queryRecord.addProcessingTime(Step.FRAGMENT, t.duration());
371
372// int numOfABoxAxioms = relevantOntologySubset.getABoxAxioms(Imports.INCLUDED).size();
373// int numOfTBoxAxioms = relevantOntologySubset.getAxiomCount() - numOfABoxAxioms;
374// Utility.logInfo("Relevant ontology-subset has been extracted: |ABox|="
375// + numOfABoxAxioms + ", |TBox|=" + numOfTBoxAxioms);
376
377// return relevantOntologySubset;
378// }
379
380// private void queryUpperBound(BasicQueryEngine upperStore, QueryRecord queryRecord, String queryText, String[] answerVariables) {
381// AnswerTuples rlAnswer = null;
382// try {
383// Utility.logDebug(queryText);
384// rlAnswer = upperStore.evaluate(queryText, answerVariables);
385// Utility.logDebug(t.duration());
386// queryRecord.updateUpperBoundAnswers(rlAnswer);
387// } finally {
388// if(rlAnswer != null) rlAnswer.dispose();
389// }
390// }
391
392// private boolean querySkolemisedRelevantSubset(OWLOntology relevantSubset, QueryRecord queryRecord) {
393// Utility.logInfo(">> Semi-Skolemisation <<");
394// t.reset();
395
396// DatalogProgram relevantProgram = new DatalogProgram(relevantSubset);
397
398// MultiStageQueryEngine relevantStore =
399// new MultiStageQueryEngine("Relevant-store", true); // checkValidity is true
400
401// relevantStore.importDataFromABoxOf(relevantSubset);
402// String relevantOriginalMarkProgram = OWLHelper.getOriginalMarkProgram(relevantSubset);
403
404// relevantStore.materialise("Mark original individuals", relevantOriginalMarkProgram);
405
406// boolean isFullyProcessed = false;
407// LinkedList<Tuple<Long>> lastTwoTriplesCounts = new LinkedList<>();
408// for (int currentMaxTermDepth = 1; !isFullyProcessed; currentMaxTermDepth++) {
409
410// if(currentMaxTermDepth > properties.getSkolemDepth()) {
411// Utility.logInfo("Maximum term depth reached");
412// break;
413// }
414
415// if(lastTwoTriplesCounts.size() == 2) {
416// if(lastTwoTriplesCounts.get(0).get(1).equals(lastTwoTriplesCounts.get(1).get(1)))
417// break;
418
419// ExponentialInterpolation interpolation = new ExponentialInterpolation(lastTwoTriplesCounts.get(0).get(0),
420// lastTwoTriplesCounts.get(0).get(1),
421// lastTwoTriplesCounts.get(1).get(0),
422// lastTwoTriplesCounts.get(1).get(1));
423// double triplesEstimate = interpolation.computeValue(currentMaxTermDepth);
424
425// Utility.logDebug("Estimate of the number of triples:" + triplesEstimate);
426
427// // exit condition if the query is not fully answered
428// if(triplesEstimate > properties.getMaxTriplesInSkolemStore()) {
429// Utility.logInfo("Interrupting Semi-Skolemisation because of triples count limit");
430// break;
431// }
432// }
433
434// Utility.logInfo("Trying with maximum depth " + currentMaxTermDepth);
435
436// int materialisationTag = relevantStore.materialiseSkolemly(relevantProgram, null,
437// currentMaxTermDepth);
438// queryRecord.addProcessingTime(Step.SKOLEM_UPPER_BOUND, t.duration());
439// if(materialisationTag == -1) {
440// relevantStore.dispose();
441// throw new Error("A consistent ontology has turned out to be " +
442// "inconsistent in the Skolemises-relevant-upper-store");
443// }
444// else if(materialisationTag != 1) {
445// Utility.logInfo("Semi-Skolemised relevant upper store cannot be employed");
446// break;
447// }
448
449// Utility.logInfo("Querying semi-Skolemised upper store...");
450// isFullyProcessed = queryUpperStore(relevantStore, queryRecord,
451// queryRecord.getExtendedQueryText(),
452// Step.SKOLEM_UPPER_BOUND);
453
454// try {
455// lastTwoTriplesCounts.add
456// (new Tuple<>((long) currentMaxTermDepth, relevantStore.getStoreSize()));
457// } catch (JRDFStoreException e) {
458// e.printStackTrace();
459// break;
460// }
461// if(lastTwoTriplesCounts.size() > 2)
462// lastTwoTriplesCounts.remove();
463
464// Utility.logDebug("Last two triples counts:" + lastTwoTriplesCounts);
465// }
466
467// relevantStore.dispose();
468// Utility.logInfo("Semi-Skolemised relevant upper store has been evaluated");
469// return isFullyProcessed;
470// }
471
472 private sealed trait ConsistencyStatus {
473 val asBoolean = false
474 }
475 private case object StatusConsistent extends ConsistencyStatus {
476 override val asBoolean = true
477 override def toString(): String = "consistent"
478 }
479 private case object StatusInconsistent extends ConsistencyStatus {
480 override def toString(): String = "inconsistent"
481 }
482 private case object StatusUnchecked extends ConsistencyStatus {
483 override def toString(): String = "N/A"
484 }
485 private implicit def boolean2consistencyStatus(b: Boolean): ConsistencyStatus = {
486 if (b) StatusConsistent else StatusInconsistent
487 }
488
489}