1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
|
/*
* Copyright 2021,2022 KRR Oxford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.ac.ox.cs.acqua.reasoner
import java.util.LinkedList;
import scala.collection.JavaConverters._
import org.semanticweb.karma2.profile.ELHOProfile
import org.semanticweb.owlapi.model.OWLOntology
import org.semanticweb.owlapi.model.parameters.Imports
import uk.ac.ox.cs.JRDFox.JRDFStoreException;
import uk.ac.ox.cs.pagoda.multistage.MultiStageQueryEngine
import uk.ac.ox.cs.pagoda.owl.OWLHelper
import uk.ac.ox.cs.pagoda.query.{
AnswerTuples,
GapByStore4ID,
GapByStore4ID2,
QueryRecord,
}
import uk.ac.ox.cs.pagoda.query.QueryRecord.Step
import uk.ac.ox.cs.pagoda.reasoner.{
MyQueryReasoner,
QueryReasoner
}
import uk.ac.ox.cs.pagoda.reasoner.light.{KarmaQueryEngine,BasicQueryEngine}
import uk.ac.ox.cs.pagoda.rules.DatalogProgram
import uk.ac.ox.cs.pagoda.summary.HermitSummaryFilter;
import uk.ac.ox.cs.pagoda.tracking.{
QueryTracker,
TrackingRuleEncoder,
TrackingRuleEncoderDisjVar1,
TrackingRuleEncoderWithGap,
}
import uk.ac.ox.cs.pagoda.util.{
ExponentialInterpolation,
PagodaProperties,
Timer,
Utility
}
import uk.ac.ox.cs.pagoda.util.tuples.Tuple;
import uk.ac.ox.cs.rsacomb.ontology.Ontology
import uk.ac.ox.cs.rsacomb.approximation.{Lowerbound,Upperbound}
class AcquaQueryReasoner(val ontology: Ontology)
extends QueryReasoner {
/** Compatibility convertions between PAGOdA and RSAComb */
import uk.ac.ox.cs.acqua.implicits.PagodaConverters._
var encoder: Option[TrackingRuleEncoder] = None
private var lazyUpperStore: Option[MultiStageQueryEngine] = None;
private val timer: Timer = new Timer();
private var _isConsistent: ConsistencyStatus = StatusUnchecked
// TODO: explicit casting to MyQueryReasoner makes no sense. Find
// another solution. Probably requires changing PAGOdA source code.
private val consistencyManager: ConsistencyManager = new ConsistencyManager(this)
val rlLowerStore: BasicQueryEngine = new BasicQueryEngine("rl-lower-bound")
val elLowerStore: KarmaQueryEngine = new KarmaQueryEngine("elho-lower-bound")
private lazy val lowerRSAEngine = new RSACombQueryReasoner(ontology, new Lowerbound)
private lazy val upperRSAEngine = new RSACombQueryReasoner(ontology, new Upperbound)
val trackingStore = new MultiStageQueryEngine("tracking", false);
var predicatesWithGap: Seq[String] = Seq.empty
/* Load ontology into PAGOdA */
val datalog = new DatalogProgram(ontology.origin);
//datalog.getGeneral().save();
if (!datalog.getGeneral().isHorn())
lazyUpperStore = Some(new MultiStageQueryEngine("lazy-upper-bound", true))
importData(datalog.getAdditionalDataFile())
private val elhoOntology: OWLOntology = new ELHOProfile().getFragment(ontology.origin);
elLowerStore processOntology elhoOntology
/** Performs nothing.
*
* Loading of the ontology is performed at instance creation to avoid
* unnecessary complexity (see main class constructor).
*
* @note Implemented for compatibility with other reasoners.
*/
def loadOntology(ontology: OWLOntology): Unit = { }
/** Preprocessing of input ontology.
*
* This is mostly PAGOdA related. Note that, while most of the
* computation in RSAComb is performed "on-demand", we are forcing
* the approximation from above/below of the input ontology to RSA,
* and the compuation of their respective canonical models to make timing
* measured more consistent.
*
* @returns whether the input ontology is found consistent after the
* preprocessing phase.
*/
def preprocess(): Boolean = {
timer.reset();
Utility logInfo "Preprocessing (and checking satisfiability)..."
val name = "data"
val datafile = getImportedData()
/* RL lower-bound check */
rlLowerStore.importRDFData(name, datafile);
rlLowerStore.materialise("lower program", datalog.getLower.toString);
if (!consistencyManager.checkRLLowerBound) {
Utility logDebug s"time for satisfiability checking: ${timer.duration()}"
_isConsistent = StatusInconsistent
return false
}
Utility logDebug s"The number of 'sameAs' assertions in RL lower store: ${rlLowerStore.getSameAsNumber}"
/* EHLO lower bound check */
val originalMarkProgram = OWLHelper.getOriginalMarkProgram(ontology.origin)
elLowerStore.importRDFData(name, datafile);
elLowerStore.materialise("saturate named individuals", originalMarkProgram);
elLowerStore.materialise("lower program", datalog.getLower.toString);
elLowerStore.initialiseKarma();
if (!consistencyManager.checkELLowerBound) {
Utility logDebug s"time for satisfiability checking: ${timer.duration()}"
_isConsistent = StatusInconsistent
return false
}
/* Lazy upper store */
val tag = lazyUpperStore.map(store => {
store.importRDFData(name, datafile)
store.materialise("saturate named individuals", originalMarkProgram)
store.materialiseRestrictedly(datalog, null)
}).getOrElse(1)
if (tag == -1) {
Utility logDebug s"time for satisfiability checking: ${timer.duration()}"
_isConsistent = StatusInconsistent
return false
}
lazyUpperStore.flatMap(store => { store.dispose(); None })
trackingStore.importRDFData(name, datafile)
trackingStore.materialise("saturate named individuals", originalMarkProgram)
val gap: GapByStore4ID = new GapByStore4ID2(trackingStore, rlLowerStore);
trackingStore.materialiseFoldedly(datalog, gap);
this.predicatesWithGap = gap.getPredicatesWithGap.asScala.toSeq;
gap.clear();
if (datalog.getGeneral.isHorn)
encoder = Some(new TrackingRuleEncoderWithGap(datalog.getUpper, trackingStore))
else
encoder = Some(new TrackingRuleEncoderDisjVar1(datalog.getUpper, trackingStore))
/* Perform consistency checking if not already inconsistent */
if (!isConsistent()) return false
consistencyManager.extractBottomFragment();
/* Force computation of lower RSA approximations and its canonical
* model. We wait to process the upperbound since it might not be
* necessary after all. */
lowerRSAEngine.preprocess()
//upperRSAEngine.preprocess()
true
}
/** Returns a the consistency status of the ontology.
*
* Performs a consistency check if the current status is undefined.
* Some logging is performed as well.
*
* @returns true if the ontology is consistent, false otherwise.
*/
def isConsistent(): Boolean = {
if (_isConsistent == StatusUnchecked) {
_isConsistent = consistencyManager.check
Utility logDebug s"time for satisfiability checking: ${timer.duration()}"
}
Utility logInfo s"The ontology is ${_isConsistent}!"
return _isConsistent.asBoolean
}
/** Evaluate a query against this reasoner.
*
* This is the main entry to compute the answers to a query.
* By the end of the computation, the query record passed as input
* will contain the answers found during the answering process.
* This behaves conservately and will try very hard not to perform
* unnecessary computation.
*
* @param query the query record to evaluate.
*/
def evaluate(query: QueryRecord): Unit = {
queryLowerAndUpperBounds(query)
val processed =
queryRSALowerBound(query) ||
queryRSAUpperBound(query)
if (!processed) {
val relevantOntologySubset: OWLOntology =
extractRelevantOntologySubset(query)
if (properties.getSkolemUpperBound == PagodaProperties.SkolemUpperBoundOptions.BEFORE_SUMMARISATION &&
querySkolemisedRelevantSubset(relevantOntologySubset, query)
) return;
Utility logInfo ">> Summarisation <<"
val summarisedChecker: HermitSummaryFilter =
new HermitSummaryFilter(query, properties.getToCallHermiT)
if(summarisedChecker.check(query.getGapAnswers) == 0) {
summarisedChecker.dispose()
return;
}
if (properties.getSkolemUpperBound == PagodaProperties.SkolemUpperBoundOptions.AFTER_SUMMARISATION &&
querySkolemisedRelevantSubset(relevantOntologySubset, query)
) {
summarisedChecker.dispose()
return;
}
Utility logInfo ">> Full reasoning <<"
timer.reset()
summarisedChecker checkByFullReasoner query.getGapAnswers
Utility logDebug s"Total time for full reasoner: ${timer.duration()}"
if (properties.getToCallHermiT) query.markAsProcessed()
summarisedChecker.dispose()
}
}
/** Only compute the upperbound for a query.
*
* @note this is not supported at the moment. Look at
* [[uk.ac.ox.cs.pagoda.reasoner.MyQueryReasoner]] for an example
* implementation.
*/
def evaluateUpper(record: QueryRecord): Unit = ???
/** Clean up the query reasoner */
override def dispose(): Unit = {
super.dispose()
if(encoder.isDefined) encoder.get.dispose()
if(rlLowerStore != null) rlLowerStore.dispose();
if(lazyUpperStore.isDefined) lazyUpperStore.get.dispose();
if(elLowerStore != null) elLowerStore.dispose();
if(trackingStore != null) trackingStore.dispose();
if(consistencyManager != null) consistencyManager.dispose();
if(datalog != null) datalog.dispose();
}
/** Perform CQ anwering for a specific upper bound engine.
*
* @param store upper bound engine to be used in the computation.
* @param query query record.
* @param queryText actual text of the query to be executed.
* @param answerVariables answer variables for the query.
*/
private def queryUpperBound(
store: BasicQueryEngine,
query: QueryRecord,
queryText: String,
answerVariables: Array[String]
): Unit = {
var rlAnswer: AnswerTuples = null
try {
Utility logDebug queryText
rlAnswer = store.evaluate(queryText, answerVariables)
Utility logDebug timer.duration()
query updateUpperBoundAnswers rlAnswer
} finally {
if (rlAnswer != null) rlAnswer.dispose()
}
}
/** Perform CQ anwering for a specific upper bound engine.
*
* @param store upper bound engine to be used in the computation.
* @param query query record.
* @param extendedQuery extended version of the query.
* @param step difficulty of the current step.
* @returns whether the query has been fully answered, i.e., the
* bounds computed so far coincide.
*
* @note It deals with blanks nodes differently from variables
* according to SPARQL semantics for OWL2 Entailment Regime. In
* particular variables are matched only against named individuals,
* and blank nodes against named and anonymous individuals.
*/
private def queryUpperStore(
upperStore: BasicQueryEngine,
query: QueryRecord,
extendedQuery: Tuple[String],
step: Step
): Boolean = {
timer.reset();
Utility logDebug "First query type"
queryUpperBound(upperStore, query, query.getQueryText, query.getAnswerVariables)
if (!query.isProcessed() && !query.getQueryText().equals(extendedQuery.get(0))) {
Utility logDebug "Second query type"
queryUpperBound(upperStore, query, extendedQuery.get(0), query.getAnswerVariables)
}
if (!query.isProcessed() && query.hasNonAnsDistinguishedVariables()) {
Utility logDebug "Third query type"
queryUpperBound(upperStore, query, extendedQuery.get(1), query.getDistinguishedVariables)
}
query.addProcessingTime(step, timer.duration())
if (query.isProcessed()) query.setDifficulty(step)
query.isProcessed()
}
/** Computes the bounds to the answers for a query.
*
* Both the lower (RL + ELHO) and upper bounds are computed here.
*
* @param query the query to be executed
* @returns whether the query has been fully answered, i.e., the
* bounds computed so far coincide.
*/
private def queryLowerAndUpperBounds(query: QueryRecord): Boolean = {
Utility logInfo ">> Base bounds <<"
val extendedQueryTexts: Tuple[String] = query.getExtendedQueryText()
var rlAnswer: AnswerTuples = null
var elAnswer: AnswerTuples = null
/* Compute RL lower bound answers */
timer.reset();
try {
rlAnswer = rlLowerStore.evaluate(query.getQueryText, query.getAnswerVariables)
Utility logDebug timer.duration()
query updateLowerBoundAnswers rlAnswer
} finally {
if (rlAnswer != null) rlAnswer.dispose()
}
query.addProcessingTime(Step.LOWER_BOUND, timer.duration());
/* Compute upper bound answers */
if(properties.getUseAlwaysSimpleUpperBound() || lazyUpperStore.isEmpty) {
Utility logDebug "Tracking store"
if (queryUpperStore(trackingStore, query, extendedQueryTexts, Step.SIMPLE_UPPER_BOUND))
return true;
}
if (!query.isBottom) {
Utility logDebug "Lazy store"
if (lazyUpperStore.isDefined && queryUpperStore(lazyUpperStore.get, query, extendedQueryTexts, Step.LAZY_UPPER_BOUND))
return true
}
timer.reset()
/* Compute ELHO lower bound answers */
try {
elAnswer = elLowerStore.evaluate(
extendedQueryTexts.get(0),
query.getAnswerVariables,
query.getLowerBoundAnswers
)
Utility logDebug timer.duration()
query updateLowerBoundAnswers elAnswer
} finally {
if (elAnswer != null) elAnswer.dispose()
}
query.addProcessingTime(Step.EL_LOWER_BOUND, timer.duration())
if (query.isProcessed()) query.setDifficulty(Step.EL_LOWER_BOUND)
query.isProcessed()
}
/** Compute lower bound using RSAComb.
*
* @param query query record to update.
* @returns true if the query is fully answered.
*/
private def queryRSALowerBound(query: QueryRecord): Boolean = {
lowerRSAEngine evaluate query
query.isProcessed
}
/** Compute upper bound using RSAComb.
*
* @param query query record to update.
* @returns true if the query is fully answered.
*/
private def queryRSAUpperBound(query: QueryRecord): Boolean = {
upperRSAEngine evaluate query
query.isProcessed
}
/** Extract a subset of the ontology relevant to the query.
*
* @param query query record for which the subset ontology is computed.
* @returns an [[OWLOntology]] subset of the input ontology.
*/
private def extractRelevantOntologySubset(query: QueryRecord): OWLOntology = {
Utility logInfo ">> Relevant ontology-subset extraction <<"
timer.reset()
val tracker: QueryTracker =
new QueryTracker(encoder.get, rlLowerStore, query)
val relevantOntologySubset: OWLOntology =
tracker.extract( trackingStore, consistencyManager.getQueryRecords, true)
query.addProcessingTime(Step.FRAGMENT, timer.duration())
val numOfABoxAxioms: Int = relevantOntologySubset.getABoxAxioms(Imports.INCLUDED).size
val numOfTBoxAxioms: Int = relevantOntologySubset.getAxiomCount() - numOfABoxAxioms
Utility logInfo s"Relevant ontology-subset has been extracted: |ABox|=$numOfABoxAxioms, |TBox|=$numOfTBoxAxioms"
return relevantOntologySubset
}
/** Query the skolemized ontology subset relevant to a query record.
*
* @param relevantSubset the relevant ontology subset.
* @param query the query to be answered.
* @returns true if the query has been fully answered.
*
* TODO: the code has been adapted from [[uk.ac.ox.cs.pagoda.reasoner.MyQueryReasoner]]
* and ported to Scala. There are better, more Scala-esque ways of
* deal with the big `while` in this function, but this should work
* for now.
*/
private def querySkolemisedRelevantSubset(
relevantSubset: OWLOntology,
query: QueryRecord
): Boolean = {
Utility logInfo ">> Semi-Skolemisation <<"
timer.reset()
val relevantProgram: DatalogProgram = new DatalogProgram(relevantSubset)
val relevantStore: MultiStageQueryEngine =
new MultiStageQueryEngine("Relevant-store", true)
relevantStore importDataFromABoxOf relevantSubset
val relevantOriginalMarkProgram: String =
OWLHelper getOriginalMarkProgram relevantSubset
relevantStore.materialise("Mark original individuals", relevantOriginalMarkProgram)
var isFullyProcessed = false
val lastTwoTriplesCounts: LinkedList[Tuple[Long]] = new LinkedList()
var currentMaxTermDepth = 1
var keepGoing = true
while (!isFullyProcessed && keepGoing) {
if (currentMaxTermDepth > properties.getSkolemDepth) {
Utility logInfo "Maximum term depth reached"
keepGoing = false
} else if (
lastTwoTriplesCounts.size() == 2 && (
lastTwoTriplesCounts.get(0).get(1).equals(lastTwoTriplesCounts.get(1).get(1)) ||
{
val interpolation: ExponentialInterpolation =
new ExponentialInterpolation(
lastTwoTriplesCounts.get(0).get(0),
lastTwoTriplesCounts.get(0).get(1),
lastTwoTriplesCounts.get(1).get(0),
lastTwoTriplesCounts.get(1).get(1)
)
val triplesEstimate: Double =
interpolation computeValue currentMaxTermDepth
Utility logDebug s"Estimate of the number of triples: $triplesEstimate"
if (triplesEstimate > properties.getMaxTriplesInSkolemStore)
Utility logInfo "Interrupting Semi-Skolemisation because of triples count limit"
triplesEstimate > properties.getMaxTriplesInSkolemStore
}
)
) {
keepGoing = false
} else {
Utility logInfo s"Trying with maximum depth $currentMaxTermDepth"
val materialisationTag: Int =
relevantStore.materialiseSkolemly(relevantProgram, null, currentMaxTermDepth)
query.addProcessingTime(Step.SKOLEM_UPPER_BOUND, timer.duration())
if (materialisationTag == -1) {
relevantStore.dispose()
throw new Error("A consistent ontology has turned out to be inconsistent in the Skolemises-relevant-upper-store")
}
if (materialisationTag != 1) {
Utility logInfo "Semi-Skolemised relevant upper store cannot be employed"
keepGoing = false
} else {
Utility logInfo "Querying semi-Skolemised upper store..."
isFullyProcessed = queryUpperStore(
relevantStore, query, query.getExtendedQueryText(), Step.SKOLEM_UPPER_BOUND
)
try {
lastTwoTriplesCounts.add(new Tuple(currentMaxTermDepth, relevantStore.getStoreSize))
if (lastTwoTriplesCounts.size() > 2)
lastTwoTriplesCounts.remove()
Utility logDebug s"Last two triples counts: $lastTwoTriplesCounts"
currentMaxTermDepth += 1
} catch {
case e: JRDFStoreException => {
e.printStackTrace()
keepGoing = false
}
}
}
}
}
relevantStore.dispose()
Utility logInfo "Semi-Skolemised relevant upper store has been evaluated"
isFullyProcessed
}
/** Consistency status of the ontology */
private sealed trait ConsistencyStatus {
val asBoolean = false
}
private case object StatusConsistent extends ConsistencyStatus {
override val asBoolean = true
override def toString(): String = "consistent"
}
private case object StatusInconsistent extends ConsistencyStatus {
override def toString(): String = "inconsistent"
}
private case object StatusUnchecked extends ConsistencyStatus {
override def toString(): String = "N/A"
}
private implicit def boolean2consistencyStatus(b: Boolean): ConsistencyStatus = {
if (b) StatusConsistent else StatusInconsistent
}
}
|