aboutsummaryrefslogtreecommitdiff
path: root/src/main/scala/uk/ac/ox/cs/acqua/reasoner
diff options
context:
space:
mode:
authorFederico Igne <federico.igne@cs.ox.ac.uk>2022-05-15 19:28:02 +0100
committerFederico Igne <federico.igne@cs.ox.ac.uk>2022-05-15 19:28:02 +0100
commit2ebd0c8c4fd421dd676004e559b69ed8e5c9bb49 (patch)
tree8ff4094587baa1b7c3eecb533762f123383fe17b /src/main/scala/uk/ac/ox/cs/acqua/reasoner
parentd99c80db73e8456c969b262a4b99714bb693bfe0 (diff)
downloadACQuA-2ebd0c8c4fd421dd676004e559b69ed8e5c9bb49.tar.gz
ACQuA-2ebd0c8c4fd421dd676004e559b69ed8e5c9bb49.zip
Finalise implementation of ACQuA query reasoner
Diffstat (limited to 'src/main/scala/uk/ac/ox/cs/acqua/reasoner')
-rw-r--r--src/main/scala/uk/ac/ox/cs/acqua/reasoner/AcquaQueryReasoner.scala593
-rw-r--r--src/main/scala/uk/ac/ox/cs/acqua/reasoner/RSACombQueryReasoner.scala (renamed from src/main/scala/uk/ac/ox/cs/acqua/reasoner/RSAQueryReasoner.scala)16
2 files changed, 334 insertions, 275 deletions
diff --git a/src/main/scala/uk/ac/ox/cs/acqua/reasoner/AcquaQueryReasoner.scala b/src/main/scala/uk/ac/ox/cs/acqua/reasoner/AcquaQueryReasoner.scala
index 4fe32d8..0aa5ff2 100644
--- a/src/main/scala/uk/ac/ox/cs/acqua/reasoner/AcquaQueryReasoner.scala
+++ b/src/main/scala/uk/ac/ox/cs/acqua/reasoner/AcquaQueryReasoner.scala
@@ -16,13 +16,14 @@
16 16
17package uk.ac.ox.cs.acqua.reasoner 17package uk.ac.ox.cs.acqua.reasoner
18 18
19import java.util.LinkedList;
20
19import scala.collection.JavaConverters._ 21import scala.collection.JavaConverters._
20import org.semanticweb.karma2.profile.ELHOProfile 22import org.semanticweb.karma2.profile.ELHOProfile
21import org.semanticweb.owlapi.model.OWLOntology 23import org.semanticweb.owlapi.model.OWLOntology
22// import org.semanticweb.owlapi.model.parameters.Imports; 24import org.semanticweb.owlapi.model.parameters.Imports
23// import uk.ac.ox.cs.JRDFox.JRDFStoreException; 25import uk.ac.ox.cs.JRDFox.JRDFStoreException;
24import uk.ac.ox.cs.pagoda.multistage.MultiStageQueryEngine 26import uk.ac.ox.cs.pagoda.multistage.MultiStageQueryEngine
25// import uk.ac.ox.cs.pagoda.owl.EqualitiesEliminator;
26import uk.ac.ox.cs.pagoda.owl.OWLHelper 27import uk.ac.ox.cs.pagoda.owl.OWLHelper
27import uk.ac.ox.cs.pagoda.query.{ 28import uk.ac.ox.cs.pagoda.query.{
28 AnswerTuples, 29 AnswerTuples,
@@ -30,7 +31,7 @@ import uk.ac.ox.cs.pagoda.query.{
30 GapByStore4ID2, 31 GapByStore4ID2,
31 QueryRecord, 32 QueryRecord,
32} 33}
33import uk.ac.ox.cs.pagoda.query.QueryRecord.Step; 34import uk.ac.ox.cs.pagoda.query.QueryRecord.Step
34import uk.ac.ox.cs.pagoda.reasoner.{ 35import uk.ac.ox.cs.pagoda.reasoner.{
35 ConsistencyManager, 36 ConsistencyManager,
36 MyQueryReasoner, 37 MyQueryReasoner,
@@ -38,28 +39,29 @@ import uk.ac.ox.cs.pagoda.reasoner.{
38} 39}
39import uk.ac.ox.cs.pagoda.reasoner.light.{KarmaQueryEngine,BasicQueryEngine} 40import uk.ac.ox.cs.pagoda.reasoner.light.{KarmaQueryEngine,BasicQueryEngine}
40import uk.ac.ox.cs.pagoda.rules.DatalogProgram 41import uk.ac.ox.cs.pagoda.rules.DatalogProgram
41// import uk.ac.ox.cs.pagoda.summary.HermitSummaryFilter; 42import uk.ac.ox.cs.pagoda.summary.HermitSummaryFilter;
42// import uk.ac.ox.cs.pagoda.tracking.QueryTracker;
43import uk.ac.ox.cs.pagoda.tracking.{ 43import uk.ac.ox.cs.pagoda.tracking.{
44 QueryTracker,
44 TrackingRuleEncoder, 45 TrackingRuleEncoder,
45 TrackingRuleEncoderDisjVar1, 46 TrackingRuleEncoderDisjVar1,
46 TrackingRuleEncoderWithGap, 47 TrackingRuleEncoderWithGap,
47} 48}
48// import uk.ac.ox.cs.pagoda.util.ExponentialInterpolation; 49import uk.ac.ox.cs.pagoda.util.{
49// import uk.ac.ox.cs.pagoda.util.PagodaProperties; 50 ExponentialInterpolation,
50import uk.ac.ox.cs.pagoda.util.Timer 51 PagodaProperties,
51import uk.ac.ox.cs.pagoda.util.Utility 52 Timer,
52// import uk.ac.ox.cs.pagoda.util.disposable.DisposedException; 53 Utility
54}
53import uk.ac.ox.cs.pagoda.util.tuples.Tuple; 55import uk.ac.ox.cs.pagoda.util.tuples.Tuple;
54import uk.ac.ox.cs.rsacomb.ontology.Ontology 56import uk.ac.ox.cs.rsacomb.ontology.Ontology
55import uk.ac.ox.cs.rsacomb.approximation.{Lowerbound,Upperbound} 57import uk.ac.ox.cs.rsacomb.approximation.{Lowerbound,Upperbound}
56 58
57// import java.util.Collection;
58// import java.util.LinkedList;
59
60class AcquaQueryReasoner(val ontology: Ontology) 59class AcquaQueryReasoner(val ontology: Ontology)
61 extends QueryReasoner { 60 extends QueryReasoner {
62 61
62 /** Compatibility convertions between PAGOdA and RSAComb */
63 import uk.ac.ox.cs.acqua.implicits.PagodaConverters._
64
63 private var encoder: Option[TrackingRuleEncoder] = None 65 private var encoder: Option[TrackingRuleEncoder] = None
64 private var lazyUpperStore: Option[MultiStageQueryEngine] = None; 66 private var lazyUpperStore: Option[MultiStageQueryEngine] = None;
65 67
@@ -167,9 +169,11 @@ class AcquaQueryReasoner(val ontology: Ontology)
167 if (!isConsistent()) return false 169 if (!isConsistent()) return false
168 consistencyManager.extractBottomFragment(); 170 consistencyManager.extractBottomFragment();
169 171
170 /* Force computation of lower/upper RSA approximations */ 172 /* Force computation of lower RSA approximations and its canonical
171 lowerRSAOntology//.computeCanonicalModel() 173 * model. We wait to process the upperbound since it might not be
172 upperRSAOntology//.computeCanonicalModel() 174 * necessary after all. */
175 lowerRSAOntology.computeCanonicalModel()
176 //upperRSAOntology.computeCanonicalModel()
173 177
174 true 178 true
175 } 179 }
@@ -190,127 +194,152 @@ class AcquaQueryReasoner(val ontology: Ontology)
190 return _isConsistent.asBoolean 194 return _isConsistent.asBoolean
191 } 195 }
192 196
197 /** Evaluate a query against this reasoner.
198 *
199 * This is the main entry to compute the answers to a query.
200 * By the end of the computation, the query record passed as input
201 * will contain the answers found during the answering process.
202 * This behaves conservately and will try very hard not to perform
203 * unnecessary computation.
204 *
205 * @param query the query record to evaluate.
206 */
193 def evaluate(query: QueryRecord): Unit = { 207 def evaluate(query: QueryRecord): Unit = {
194 if(queryLowerAndUpperBounds(query)) 208 val processed =
195 return; 209 queryLowerAndUpperBounds(query) ||
196 210 queryRSALowerBound(query) ||
197// OWLOntology relevantOntologySubset = extractRelevantOntologySubset(queryRecord); 211 queryRSAUpperBound(query)
198 212 if (!processed) {
199//// queryRecord.saveRelevantOntology("/home/alessandro/Desktop/test-relevant-ontology-"+relevantOntologiesCounter+".owl"); 213 val relevantOntologySubset: OWLOntology =
200//// relevantOntologiesCounter++; 214 extractRelevantOntologySubset(query)
201 215
202// if(properties.getSkolemUpperBound() == PagodaProperties.SkolemUpperBoundOptions.BEFORE_SUMMARISATION 216 if (properties.getSkolemUpperBound == PagodaProperties.SkolemUpperBoundOptions.BEFORE_SUMMARISATION &&
203// && querySkolemisedRelevantSubset(relevantOntologySubset, queryRecord)) { 217 querySkolemisedRelevantSubset(relevantOntologySubset, query)
204// return; 218 ) return;
205// } 219
206 220 Utility logInfo ">> Summarisation <<"
207// Utility.logInfo(">> Summarisation <<"); 221 val summarisedChecker: HermitSummaryFilter =
208// HermitSummaryFilter summarisedChecker = new HermitSummaryFilter(queryRecord, properties.getToCallHermiT()); 222 new HermitSummaryFilter(query, properties.getToCallHermiT)
209// if(summarisedChecker.check(queryRecord.getGapAnswers()) == 0) { 223 if(summarisedChecker.check(query.getGapAnswers) == 0) {
210// summarisedChecker.dispose(); 224 summarisedChecker.dispose()
211// return; 225 return;
212// } 226 }
213 227
214// if(properties.getSkolemUpperBound() == PagodaProperties.SkolemUpperBoundOptions.AFTER_SUMMARISATION 228 if (properties.getSkolemUpperBound == PagodaProperties.SkolemUpperBoundOptions.AFTER_SUMMARISATION &&
215// && querySkolemisedRelevantSubset(relevantOntologySubset, queryRecord)) { 229 querySkolemisedRelevantSubset(relevantOntologySubset, query)
216// summarisedChecker.dispose(); 230 ) {
217// return; 231 summarisedChecker.dispose()
218// } 232 return;
219 233 }
220// Utility.logInfo(">> Full reasoning <<"); 234
221// Timer t = new Timer(); 235 Utility logInfo ">> Full reasoning <<"
222// summarisedChecker.checkByFullReasoner(queryRecord.getGapAnswers()); 236 timer.reset()
223// Utility.logDebug("Total time for full reasoner: " + t.duration()); 237 summarisedChecker checkByFullReasoner query.getGapAnswers
224 238 Utility logDebug s"Total time for full reasoner: ${timer.duration()}"
225// if(properties.getToCallHermiT()) 239
226// queryRecord.markAsProcessed(); 240 if (properties.getToCallHermiT) query.markAsProcessed()
227// summarisedChecker.dispose(); 241
228 ??? 242 summarisedChecker.dispose()
243 }
244 }
245
246 /** Only compute the upperbound for a query.
247 *
248 * @note this is not supported at the moment. Look at
249 * [[uk.ac.ox.cs.pagoda.reasoner.MyQueryReasoner]] for an example
250 * implementation.
251 */
252 def evaluateUpper(record: QueryRecord): Unit = ???
253
254 /** Clean up the query reasoner */
255 override def dispose(): Unit = {
256 super.dispose()
257 if(encoder.isDefined) encoder.get.dispose()
258 if(rlLowerStore != null) rlLowerStore.dispose();
259 if(lazyUpperStore.isDefined) lazyUpperStore.get.dispose();
260 if(elLowerStore != null) elLowerStore.dispose();
261 if(trackingStore != null) trackingStore.dispose();
262 if(consistencyManager != null) consistencyManager.dispose();
263 if(datalog != null) datalog.dispose();
264 }
265
266 /** Perform CQ anwering for a specific upper bound engine.
267 *
268 * @param store upper bound engine to be used in the computation.
269 * @param query query record.
270 * @param queryText actual text of the query to be executed.
271 * @param answerVariables answer variables for the query.
272 */
273 private def queryUpperBound(
274 store: BasicQueryEngine,
275 query: QueryRecord,
276 queryText: String,
277 answerVariables: Array[String]
278 ): Unit = {
279 var rlAnswer: AnswerTuples = null
280 try {
281 Utility logDebug queryText
282 rlAnswer = store.evaluate(queryText, answerVariables)
283 Utility logDebug timer.duration()
284 query updateUpperBoundAnswers rlAnswer
285 } finally {
286 if (rlAnswer != null) rlAnswer.dispose()
287 }
229 } 288 }
230 289
231 def evaluateUpper(record: QueryRecord): Unit= ??? 290 /** Perform CQ anwering for a specific upper bound engine.
232 291 *
233// @Override 292 * @param store upper bound engine to be used in the computation.
234// public void evaluate(QueryRecord queryRecord) { 293 * @param query query record.
235// } 294 * @param extendedQuery extended version of the query.
236 295 * @param step difficulty of the current step.
237// @Override 296 * @returns whether the query has been fully answered, i.e., the
238// public void evaluateUpper(QueryRecord queryRecord) { 297 * bounds computed so far coincide.
239// if(isDisposed()) throw new DisposedException(); 298 *
240// // TODO? add new upper store 299 * @note It deals with blanks nodes differently from variables
241// AnswerTuples rlAnswer = null; 300 * according to SPARQL semantics for OWL2 Entailment Regime. In
242// boolean useFull = queryRecord.isBottom() || lazyUpperStore == null; 301 * particular variables are matched only against named individuals,
243// try { 302 * and blank nodes against named and anonymous individuals.
244// rlAnswer = 303 */
245// (useFull ? trackingStore : lazyUpperStore).evaluate(queryRecord.getQueryText(), queryRecord.getAnswerVariables()); 304 private def queryUpperStore(
246// queryRecord.updateUpperBoundAnswers(rlAnswer, true); 305 upperStore: BasicQueryEngine,
247// } finally { 306 query: QueryRecord,
248// if(rlAnswer != null) rlAnswer.dispose(); 307 extendedQuery: Tuple[String],
249// } 308 step: Step
250// } 309 ): Boolean = {
251 310 timer.reset();
252// @Override 311
253// public void dispose() { 312 Utility logDebug "First query type"
254// super.dispose(); 313 queryUpperBound(upperStore, query, query.getQueryText, query.getAnswerVariables)
255 314 if (!query.isProcessed() && !query.getQueryText().equals(extendedQuery.get(0))) {
256// if(encoder != null) encoder.dispose(); 315 Utility logDebug "Second query type"
257// if(rlLowerStore != null) rlLowerStore.dispose(); 316 queryUpperBound(upperStore, query, extendedQuery.get(0), query.getAnswerVariables)
258// if(lazyUpperStore != null) lazyUpperStore.dispose(); 317 }
259// if(elLowerStore != null) elLowerStore.dispose(); 318 if (!query.isProcessed() && query.hasNonAnsDistinguishedVariables()) {
260// if(trackingStore != null) trackingStore.dispose(); 319 Utility logDebug "Third query type"
261// if(consistency != null) consistency.dispose(); 320 queryUpperBound(upperStore, query, extendedQuery.get(1), query.getDistinguishedVariables)
262// if(program != null) program.dispose(); 321 }
263// } 322
264 323 query.addProcessingTime(step, timer.duration())
265// protected void internal_importDataFile(String name, String datafile) { 324 if (query.isProcessed()) query.setDifficulty(step)
266//// addDataFile(datafile); 325 query.isProcessed()
267// rlLowerStore.importRDFData(name, datafile); 326 }
268// if(lazyUpperStore != null) 327
269// lazyUpperStore.importRDFData(name, datafile); 328 /** Computes the bounds to the answers for a query.
270// elLowerStore.importRDFData(name, datafile); 329 *
271// trackingStore.importRDFData(name, datafile); 330 * Both the lower (RL + ELHO) and upper bounds are computed here.
272// } 331 *
273 332 * @param query the query to be executed
274// /** 333 * @returns whether the query has been fully answered, i.e., the
275// * It deals with blanks nodes differently from variables 334 * bounds computed so far coincide.
276// * according to SPARQL semantics for OWL2 Entailment Regime. 335 */
277// * <p>
278// * In particular variables are matched only against named individuals,
279// * and blank nodes against named and anonymous individuals.
280// */
281// private boolean queryUpperStore(BasicQueryEngine upperStore, QueryRecord queryRecord,
282// Tuple<String> extendedQuery, Step step) {
283// t.reset();
284
285// Utility.logDebug("First query type");
286// queryUpperBound(upperStore, queryRecord, queryRecord.getQueryText(), queryRecord.getAnswerVariables());
287// if(!queryRecord.isProcessed() && !queryRecord.getQueryText().equals(extendedQuery.get(0))) {
288// Utility.logDebug("Second query type");
289// queryUpperBound(upperStore, queryRecord, extendedQuery.get(0), queryRecord.getAnswerVariables());
290// }
291// if(!queryRecord.isProcessed() && queryRecord.hasNonAnsDistinguishedVariables()) {
292// Utility.logDebug("Third query type");
293// queryUpperBound(upperStore, queryRecord, extendedQuery.get(1), queryRecord.getDistinguishedVariables());
294// }
295
296// queryRecord.addProcessingTime(step, t.duration());
297// if(queryRecord.isProcessed()) {
298// queryRecord.setDifficulty(step);
299// return true;
300// }
301// return false;
302// }
303
304 /**
305 * Returns the part of the ontology relevant for Hermit, while computing the bound answers.
306 */
307 private def queryLowerAndUpperBounds(query: QueryRecord): Boolean = { 336 private def queryLowerAndUpperBounds(query: QueryRecord): Boolean = {
308 Utility logInfo ">> Base bounds <<" 337 Utility logInfo ">> Base bounds <<"
309 val extendedQueryTexts: Tuple[String] = query.getExtendedQueryText() 338 val extendedQueryTexts: Tuple[String] = query.getExtendedQueryText()
310 var rlAnswer: AnswerTuples = null 339 var rlAnswer: AnswerTuples = null
311 var elAnswer: AnswerTuples = null 340 var elAnswer: AnswerTuples = null
312 341
313 /* Computing RL lower bound answers */ 342 /* Compute RL lower bound answers */
314 timer.reset(); 343 timer.reset();
315 try { 344 try {
316 rlAnswer = rlLowerStore.evaluate(query.getQueryText, query.getAnswerVariables) 345 rlAnswer = rlLowerStore.evaluate(query.getQueryText, query.getAnswerVariables)
@@ -321,148 +350,182 @@ class AcquaQueryReasoner(val ontology: Ontology)
321 } 350 }
322 query.addProcessingTime(Step.LOWER_BOUND, timer.duration()); 351 query.addProcessingTime(Step.LOWER_BOUND, timer.duration());
323 352
353 /* Compute upper bound answers */
324 if(properties.getUseAlwaysSimpleUpperBound() || lazyUpperStore.isEmpty) { 354 if(properties.getUseAlwaysSimpleUpperBound() || lazyUpperStore.isEmpty) {
325 Utility logDebug "Tracking store" 355 Utility logDebug "Tracking store"
326 // if (queryUpperStore(trackingStore, query, extendedQueryTexts, Step.SIMPLE_UPPER_BOUND)) 356 if (queryUpperStore(trackingStore, query, extendedQueryTexts, Step.SIMPLE_UPPER_BOUND))
327 // return true; 357 return true;
358 }
359 if (!query.isBottom) {
360 Utility logDebug "Lazy store"
361 if (lazyUpperStore.isDefined && queryUpperStore(lazyUpperStore.get, query, extendedQueryTexts, Step.LAZY_UPPER_BOUND))
362 return true
328 } 363 }
329 364
330// if(!queryRecord.isBottom()) { 365 timer.reset()
331// Utility.logDebug("Lazy store"); 366 /* Compute ELHO lower bound answers */
332// if(lazyUpperStore != null && queryUpperStore(lazyUpperStore, queryRecord, extendedQueryTexts, Step.LAZY_UPPER_BOUND)) 367 try {
333// return true; 368 elAnswer = elLowerStore.evaluate(
334// } 369 extendedQueryTexts.get(0),
335 370 query.getAnswerVariables,
336// t.reset(); 371 query.getLowerBoundAnswers
337// try { 372 )
338// elAnswer = elLowerStore.evaluate(extendedQueryTexts.get(0), 373 Utility logDebug timer.duration()
339// queryRecord.getAnswerVariables(), 374 query updateLowerBoundAnswers elAnswer
340// queryRecord.getLowerBoundAnswers()); 375 } finally {
341// Utility.logDebug(t.duration()); 376 if (elAnswer != null) elAnswer.dispose()
342// queryRecord.updateLowerBoundAnswers(elAnswer); 377 }
343// } finally { 378 query.addProcessingTime(Step.EL_LOWER_BOUND, timer.duration())
344// if(elAnswer != null) elAnswer.dispose(); 379
345// } 380 if (query.isProcessed()) query.setDifficulty(Step.EL_LOWER_BOUND)
346// queryRecord.addProcessingTime(Step.EL_LOWER_BOUND, t.duration()); 381 query.isProcessed()
347 382 }
348// if(queryRecord.isProcessed()) { 383
349// queryRecord.setDifficulty(Step.EL_LOWER_BOUND); 384 /** Compute lower bound using RSAComb.
350// return true; 385 *
351// } 386 * @param query query record to update.
352 387 * @returns true if the query is fully answered.
353 return false; 388 */
389 private def queryRSALowerBound(query: QueryRecord): Boolean = {
390 import uk.ac.ox.cs.acqua.implicits.RSACombAnswerTuples._
391 val answers = lowerRSAOntology ask query
392 query updateLowerBoundAnswers answers
393 query.isProcessed
354 } 394 }
355 395
356// private OWLOntology extractRelevantOntologySubset(QueryRecord queryRecord) { 396 /** Compute upper bound using RSAComb.
357// Utility.logInfo(">> Relevant ontology-subset extraction <<"); 397 *
358 398 * @param query query record to update.
359// t.reset(); 399 * @returns true if the query is fully answered.
360 400 */
361// QueryTracker tracker = new QueryTracker(encoder, rlLowerStore, queryRecord); 401 private def queryRSAUpperBound(query: QueryRecord): Boolean = {
362// OWLOntology relevantOntologySubset = tracker.extract(trackingStore, consistency.getQueryRecords(), true); 402 import uk.ac.ox.cs.acqua.implicits.RSACombAnswerTuples._
363 403 val answers = upperRSAOntology ask query
364// queryRecord.addProcessingTime(Step.FRAGMENT, t.duration()); 404 query updateUpperBoundAnswers answers
365 405 query.isProcessed
366// int numOfABoxAxioms = relevantOntologySubset.getABoxAxioms(Imports.INCLUDED).size(); 406 }
367// int numOfTBoxAxioms = relevantOntologySubset.getAxiomCount() - numOfABoxAxioms; 407
368// Utility.logInfo("Relevant ontology-subset has been extracted: |ABox|=" 408 /** Extract a subset of the ontology relevant to the query.
369// + numOfABoxAxioms + ", |TBox|=" + numOfTBoxAxioms); 409 *
370 410 * @param query query record for which the subset ontology is computed.
371// return relevantOntologySubset; 411 * @returns an [[OWLOntology]] subset of the input ontology.
372// } 412 */
373 413 private def extractRelevantOntologySubset(query: QueryRecord): OWLOntology = {
374// private void queryUpperBound(BasicQueryEngine upperStore, QueryRecord queryRecord, String queryText, String[] answerVariables) { 414 Utility logInfo ">> Relevant ontology-subset extraction <<"
375// AnswerTuples rlAnswer = null; 415
376// try { 416 timer.reset()
377// Utility.logDebug(queryText); 417
378// rlAnswer = upperStore.evaluate(queryText, answerVariables); 418 val tracker: QueryTracker =
379// Utility.logDebug(t.duration()); 419 new QueryTracker(encoder.get, rlLowerStore, query)
380// queryRecord.updateUpperBoundAnswers(rlAnswer); 420 val relevantOntologySubset: OWLOntology =
381// } finally { 421 tracker.extract( trackingStore, consistencyManager.getQueryRecords, true)
382// if(rlAnswer != null) rlAnswer.dispose(); 422
383// } 423 query.addProcessingTime(Step.FRAGMENT, timer.duration())
384// } 424
385 425 val numOfABoxAxioms: Int = relevantOntologySubset.getABoxAxioms(Imports.INCLUDED).size
386// private boolean querySkolemisedRelevantSubset(OWLOntology relevantSubset, QueryRecord queryRecord) { 426 val numOfTBoxAxioms: Int = relevantOntologySubset.getAxiomCount() - numOfABoxAxioms
387// Utility.logInfo(">> Semi-Skolemisation <<"); 427 Utility logInfo s"Relevant ontology-subset has been extracted: |ABox|=$numOfABoxAxioms, |TBox|=$numOfTBoxAxioms"
388// t.reset(); 428
389 429 return relevantOntologySubset
390// DatalogProgram relevantProgram = new DatalogProgram(relevantSubset); 430 }
391 431
392// MultiStageQueryEngine relevantStore = 432 /** Query the skolemized ontology subset relevant to a query record.
393// new MultiStageQueryEngine("Relevant-store", true); // checkValidity is true 433 *
394 434 * @param relevantSubset the relevant ontology subset.
395// relevantStore.importDataFromABoxOf(relevantSubset); 435 * @param query the query to be answered.
396// String relevantOriginalMarkProgram = OWLHelper.getOriginalMarkProgram(relevantSubset); 436 * @returns true if the query has been fully answered.
397 437 *
398// relevantStore.materialise("Mark original individuals", relevantOriginalMarkProgram); 438 * TODO: the code has been adapted from [[uk.ac.ox.cs.pagoda.reasoner.MyQueryReasoner]]
399 439 * and ported to Scala. There are better, more Scala-esque ways of
400// boolean isFullyProcessed = false; 440 * deal with the big `while` in this function, but this should work
401// LinkedList<Tuple<Long>> lastTwoTriplesCounts = new LinkedList<>(); 441 * for now.
402// for (int currentMaxTermDepth = 1; !isFullyProcessed; currentMaxTermDepth++) { 442 */
403 443 private def querySkolemisedRelevantSubset(
404// if(currentMaxTermDepth > properties.getSkolemDepth()) { 444 relevantSubset: OWLOntology,
405// Utility.logInfo("Maximum term depth reached"); 445 query: QueryRecord
406// break; 446 ): Boolean = {
407// } 447 Utility logInfo ">> Semi-Skolemisation <<"
408 448 timer.reset()
409// if(lastTwoTriplesCounts.size() == 2) { 449
410// if(lastTwoTriplesCounts.get(0).get(1).equals(lastTwoTriplesCounts.get(1).get(1))) 450 val relevantProgram: DatalogProgram = new DatalogProgram(relevantSubset)
411// break; 451 val relevantStore: MultiStageQueryEngine =
412 452 new MultiStageQueryEngine("Relevant-store", true)
413// ExponentialInterpolation interpolation = new ExponentialInterpolation(lastTwoTriplesCounts.get(0).get(0), 453 relevantStore importDataFromABoxOf relevantSubset
414// lastTwoTriplesCounts.get(0).get(1), 454 val relevantOriginalMarkProgram: String =
415// lastTwoTriplesCounts.get(1).get(0), 455 OWLHelper getOriginalMarkProgram relevantSubset
416// lastTwoTriplesCounts.get(1).get(1)); 456 relevantStore.materialise("Mark original individuals", relevantOriginalMarkProgram)
417// double triplesEstimate = interpolation.computeValue(currentMaxTermDepth); 457
418 458 var isFullyProcessed = false
419// Utility.logDebug("Estimate of the number of triples:" + triplesEstimate); 459 val lastTwoTriplesCounts: LinkedList[Tuple[Long]] = new LinkedList()
420 460 var currentMaxTermDepth = 1
421// // exit condition if the query is not fully answered 461 var keepGoing = true
422// if(triplesEstimate > properties.getMaxTriplesInSkolemStore()) { 462 while (!isFullyProcessed && keepGoing) {
423// Utility.logInfo("Interrupting Semi-Skolemisation because of triples count limit"); 463 if (currentMaxTermDepth > properties.getSkolemDepth) {
424// break; 464 Utility logInfo "Maximum term depth reached"
425// } 465 keepGoing = false
426// } 466 } else if (
427 467 lastTwoTriplesCounts.size() == 2 && (
428// Utility.logInfo("Trying with maximum depth " + currentMaxTermDepth); 468 lastTwoTriplesCounts.get(0).get(1).equals(lastTwoTriplesCounts.get(1).get(1)) ||
429 469 {
430// int materialisationTag = relevantStore.materialiseSkolemly(relevantProgram, null, 470 val interpolation: ExponentialInterpolation =
431// currentMaxTermDepth); 471 new ExponentialInterpolation(
432// queryRecord.addProcessingTime(Step.SKOLEM_UPPER_BOUND, t.duration()); 472 lastTwoTriplesCounts.get(0).get(0),
433// if(materialisationTag == -1) { 473 lastTwoTriplesCounts.get(0).get(1),
434// relevantStore.dispose(); 474 lastTwoTriplesCounts.get(1).get(0),
435// throw new Error("A consistent ontology has turned out to be " + 475 lastTwoTriplesCounts.get(1).get(1)
436// "inconsistent in the Skolemises-relevant-upper-store"); 476 )
437// } 477 val triplesEstimate: Double =
438// else if(materialisationTag != 1) { 478 interpolation computeValue currentMaxTermDepth
439// Utility.logInfo("Semi-Skolemised relevant upper store cannot be employed"); 479 Utility logDebug s"Estimate of the number of triples: $triplesEstimate"
440// break; 480 if (triplesEstimate > properties.getMaxTriplesInSkolemStore)
441// } 481 Utility logInfo "Interrupting Semi-Skolemisation because of triples count limit"
442 482 triplesEstimate > properties.getMaxTriplesInSkolemStore
443// Utility.logInfo("Querying semi-Skolemised upper store..."); 483 }
444// isFullyProcessed = queryUpperStore(relevantStore, queryRecord, 484 )
445// queryRecord.getExtendedQueryText(), 485 ) {
446// Step.SKOLEM_UPPER_BOUND); 486 keepGoing = false
447 487 } else {
448// try { 488 Utility logInfo s"Trying with maximum depth $currentMaxTermDepth"
449// lastTwoTriplesCounts.add 489
450// (new Tuple<>((long) currentMaxTermDepth, relevantStore.getStoreSize())); 490 val materialisationTag: Int =
451// } catch (JRDFStoreException e) { 491 relevantStore.materialiseSkolemly(relevantProgram, null, currentMaxTermDepth)
452// e.printStackTrace(); 492 query.addProcessingTime(Step.SKOLEM_UPPER_BOUND, timer.duration())
453// break; 493 if (materialisationTag == -1) {
454// } 494 relevantStore.dispose()
455// if(lastTwoTriplesCounts.size() > 2) 495 throw new Error("A consistent ontology has turned out to be inconsistent in the Skolemises-relevant-upper-store")
456// lastTwoTriplesCounts.remove(); 496 }
457 497
458// Utility.logDebug("Last two triples counts:" + lastTwoTriplesCounts); 498 if (materialisationTag != 1) {
459// } 499 Utility logInfo "Semi-Skolemised relevant upper store cannot be employed"
460 500 keepGoing = false
461// relevantStore.dispose(); 501 } else {
462// Utility.logInfo("Semi-Skolemised relevant upper store has been evaluated"); 502 Utility logInfo "Querying semi-Skolemised upper store..."
463// return isFullyProcessed; 503 isFullyProcessed = queryUpperStore(
464// } 504 relevantStore, query, query.getExtendedQueryText(), Step.SKOLEM_UPPER_BOUND
505 )
506
507 try {
508 lastTwoTriplesCounts.add(new Tuple(currentMaxTermDepth, relevantStore.getStoreSize))
509 if (lastTwoTriplesCounts.size() > 2)
510 lastTwoTriplesCounts.remove()
511 Utility logDebug s"Last two triples counts: $lastTwoTriplesCounts"
512 currentMaxTermDepth += 1
513 } catch {
514 case e: JRDFStoreException => {
515 e.printStackTrace()
516 keepGoing = false
517 }
518 }
519 }
520 }
521 }
522
523 relevantStore.dispose()
524 Utility logInfo "Semi-Skolemised relevant upper store has been evaluated"
525 isFullyProcessed
526 }
465 527
528 /** Consistency status of the ontology */
466 private sealed trait ConsistencyStatus { 529 private sealed trait ConsistencyStatus {
467 val asBoolean = false 530 val asBoolean = false
468 } 531 }
diff --git a/src/main/scala/uk/ac/ox/cs/acqua/reasoner/RSAQueryReasoner.scala b/src/main/scala/uk/ac/ox/cs/acqua/reasoner/RSACombQueryReasoner.scala
index a6c5276..6d89b7b 100644
--- a/src/main/scala/uk/ac/ox/cs/acqua/reasoner/RSAQueryReasoner.scala
+++ b/src/main/scala/uk/ac/ox/cs/acqua/reasoner/RSACombQueryReasoner.scala
@@ -20,24 +20,20 @@ import java.util.Collection;
20import scala.collection.JavaConverters._ 20import scala.collection.JavaConverters._
21 21
22import org.semanticweb.owlapi.model.OWLOntology 22import org.semanticweb.owlapi.model.OWLOntology
23import uk.ac.ox.cs.rsacomb.ontology.RSAOntology
24import uk.ac.ox.cs.rsacomb.approximation.{Approximation,Lowerbound} 23import uk.ac.ox.cs.rsacomb.approximation.{Approximation,Lowerbound}
25import uk.ac.ox.cs.rsacomb.ontology.Ontology 24import uk.ac.ox.cs.rsacomb.ontology.{Ontology,RSAOntology}
26import uk.ac.ox.cs.pagoda.query.QueryRecord 25import uk.ac.ox.cs.pagoda.query.QueryRecord
27import uk.ac.ox.cs.pagoda.reasoner.QueryReasoner 26import uk.ac.ox.cs.pagoda.reasoner.QueryReasoner
27import uk.ac.ox.cs.acqua.approximation.Noop
28 28
29class RSAQueryReasoner(val origin: Ontology) extends QueryReasoner { 29class RSACombQueryReasoner(
30 val origin: Ontology,
31 val toRSA: Approximation[RSAOntology] = Noop
32) extends QueryReasoner {
30 33
31 /* Implicit compatibility between PAGOdA and RSAComb types */ 34 /* Implicit compatibility between PAGOdA and RSAComb types */
32 import uk.ac.ox.cs.acqua.implicits.PagodaConverters._ 35 import uk.ac.ox.cs.acqua.implicits.PagodaConverters._
33 36
34 /** This class is instantiated when the input ontology is RSA.
35 * Approximation (via any algorithm with RSAOntology as target)
36 * doesn't perform anything, but is useful to turn a generic
37 * [[uk.ac.ox.cs.rsacomb.ontology.Ontology]] into an
38 * [[uk.ac.ox.cs.rsacomb.RSAOntology]].
39 */
40 private val toRSA: Approximation[RSAOntology] = new Lowerbound
41 val rsa: RSAOntology = origin approximate toRSA 37 val rsa: RSAOntology = origin approximate toRSA
42 38
43 /** Doesn't perform any action. 39 /** Doesn't perform any action.