diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/main/java/uk/ac/ox/cs/pagoda/query/QueryRecord.java | 16 | ||||
| -rw-r--r-- | src/main/scala/uk/ac/ox/cs/acqua/Main.scala | 11 | ||||
| -rw-r--r-- | src/main/scala/uk/ac/ox/cs/acqua/approximation/Noop.scala | 34 | ||||
| -rw-r--r-- | src/main/scala/uk/ac/ox/cs/acqua/implicits/RSACombAnswerTuples.scala | 35 | ||||
| -rw-r--r-- | src/main/scala/uk/ac/ox/cs/acqua/reasoner/AcquaQueryReasoner.scala | 593 | ||||
| -rw-r--r-- | src/main/scala/uk/ac/ox/cs/acqua/reasoner/RSACombQueryReasoner.scala (renamed from src/main/scala/uk/ac/ox/cs/acqua/reasoner/RSAQueryReasoner.scala) | 16 |
6 files changed, 400 insertions, 305 deletions
diff --git a/src/main/java/uk/ac/ox/cs/pagoda/query/QueryRecord.java b/src/main/java/uk/ac/ox/cs/pagoda/query/QueryRecord.java index 5fa1b23..1fb4ed7 100644 --- a/src/main/java/uk/ac/ox/cs/pagoda/query/QueryRecord.java +++ b/src/main/java/uk/ac/ox/cs/pagoda/query/QueryRecord.java | |||
| @@ -668,13 +668,13 @@ public class QueryRecord extends Disposable { | |||
| 668 | } | 668 | } |
| 669 | 669 | ||
| 670 | public boolean updateUpperBoundAnswers(AnswerTuples answerTuples, boolean toCheckAux) { | 670 | public boolean updateUpperBoundAnswers(AnswerTuples answerTuples, boolean toCheckAux) { |
| 671 | RDFoxAnswerTuples rdfAnswerTuples; | 671 | // RDFoxAnswerTuples rdfAnswerTuples; |
| 672 | if(answerTuples instanceof RDFoxAnswerTuples) | 672 | // if(answerTuples instanceof RDFoxAnswerTuples) |
| 673 | rdfAnswerTuples = (RDFoxAnswerTuples) answerTuples; | 673 | // rdfAnswerTuples = (RDFoxAnswerTuples) answerTuples; |
| 674 | else { | 674 | // else { |
| 675 | Utility.logError("The upper bound must be computed by RDFox!"); | 675 | // Utility.logError("The upper bound must be computed by RDFox!"); |
| 676 | return false; | 676 | // return false; |
| 677 | } | 677 | // } |
| 678 | 678 | ||
| 679 | if(soundAnswerTuples.size() > 0) { | 679 | if(soundAnswerTuples.size() > 0) { |
| 680 | int number = 0; | 680 | int number = 0; |
| @@ -697,7 +697,7 @@ public class QueryRecord extends Disposable { | |||
| 697 | Set<AnswerTuple> tupleSet = new HashSet<AnswerTuple>(); | 697 | Set<AnswerTuple> tupleSet = new HashSet<AnswerTuple>(); |
| 698 | AnswerTuple tuple, extendedTuple; | 698 | AnswerTuple tuple, extendedTuple; |
| 699 | for(; answerTuples.isValid(); answerTuples.moveNext()) { | 699 | for(; answerTuples.isValid(); answerTuples.moveNext()) { |
| 700 | extendedTuple = rdfAnswerTuples.getTuple(); | 700 | extendedTuple = answerTuples.getTuple(); |
| 701 | if(isBottom() || !extendedTuple.hasAnonymousIndividual()) { | 701 | if(isBottom() || !extendedTuple.hasAnonymousIndividual()) { |
| 702 | tuple = AnswerTuple.create(extendedTuple, answerVariables[0].length); | 702 | tuple = AnswerTuple.create(extendedTuple, answerVariables[0].length); |
| 703 | if((!toCheckAux || !tuple.hasAuxPredicate()) && !soundAnswerTuples.contains(tuple)) { | 703 | if((!toCheckAux || !tuple.hasAuxPredicate()) && !soundAnswerTuples.contains(tuple)) { |
diff --git a/src/main/scala/uk/ac/ox/cs/acqua/Main.scala b/src/main/scala/uk/ac/ox/cs/acqua/Main.scala index 62cf87c..dfb9630 100644 --- a/src/main/scala/uk/ac/ox/cs/acqua/Main.scala +++ b/src/main/scala/uk/ac/ox/cs/acqua/Main.scala | |||
| @@ -25,7 +25,10 @@ import uk.ac.ox.cs.pagoda.reasoner.{ELHOQueryReasoner,MyQueryReasoner,QueryReaso | |||
| 25 | import uk.ac.ox.cs.pagoda.util.PagodaProperties; | 25 | import uk.ac.ox.cs.pagoda.util.PagodaProperties; |
| 26 | import uk.ac.ox.cs.pagoda.util.Utility; | 26 | import uk.ac.ox.cs.pagoda.util.Utility; |
| 27 | 27 | ||
| 28 | import uk.ac.ox.cs.acqua.reasoner.RSAQueryReasoner | 28 | import uk.ac.ox.cs.acqua.reasoner.{ |
| 29 | AcquaQueryReasoner, | ||
| 30 | RSACombQueryReasoner | ||
| 31 | } | ||
| 29 | import uk.ac.ox.cs.acqua.util.AcquaConfig | 32 | import uk.ac.ox.cs.acqua.util.AcquaConfig |
| 30 | 33 | ||
| 31 | object Acqua extends App { | 34 | object Acqua extends App { |
| @@ -45,11 +48,9 @@ object Acqua extends App { | |||
| 45 | } else if (OWLHelper.isInELHO(ontology.origin)) { | 48 | } else if (OWLHelper.isInELHO(ontology.origin)) { |
| 46 | new ELHOQueryReasoner(); | 49 | new ELHOQueryReasoner(); |
| 47 | } else if (ontology.isRSA) { | 50 | } else if (ontology.isRSA) { |
| 48 | new RSAQueryReasoner(ontology) | 51 | new RSACombQueryReasoner(ontology) |
| 49 | } else { | 52 | } else { |
| 50 | // Return ACQuA reasoner | 53 | new AcquaQueryReasoner(ontology) |
| 51 | // new MyQueryReasoner(performMultiStages, considerEqualities); | ||
| 52 | ??? | ||
| 53 | } | 54 | } |
| 54 | 55 | ||
| 55 | /* Preprocessing */ | 56 | /* Preprocessing */ |
diff --git a/src/main/scala/uk/ac/ox/cs/acqua/approximation/Noop.scala b/src/main/scala/uk/ac/ox/cs/acqua/approximation/Noop.scala new file mode 100644 index 0000000..69489ac --- /dev/null +++ b/src/main/scala/uk/ac/ox/cs/acqua/approximation/Noop.scala | |||
| @@ -0,0 +1,34 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2021,2022 KRR Oxford | ||
| 3 | * | ||
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); | ||
| 5 | * you may not use this file except in compliance with the License. | ||
| 6 | * You may obtain a copy of the License at | ||
| 7 | * | ||
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 | ||
| 9 | * | ||
| 10 | * Unless required by applicable law or agreed to in writing, software | ||
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, | ||
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| 13 | * See the License for the specific language governing permissions and | ||
| 14 | * limitations under the License. | ||
| 15 | */ | ||
| 16 | |||
| 17 | package uk.ac.ox.cs.acqua.approximation | ||
| 18 | |||
| 19 | import uk.ac.ox.cs.rsacomb.ontology.{Ontology,RSAOntology} | ||
| 20 | import uk.ac.ox.cs.rsacomb.approximation.Approximation | ||
| 21 | |||
| 22 | /** Dummy approximation without any effect. | ||
| 23 | * | ||
| 24 | * @note this is only useful to convert an already RSA | ||
| 25 | * [[uk.ac.ox.cs.rsacomb.ontology.Ontology]] into an | ||
| 26 | * [[uk.ac.ox.cs.rsacomb.ontology.RSAOntology]]. | ||
| 27 | */ | ||
| 28 | object Noop extends Approximation[RSAOntology] { | ||
| 29 | |||
| 30 | def approximate(ontology: Ontology): RSAOntology = | ||
| 31 | RSAOntology(ontology.origin, ontology.axioms, ontology.datafiles) | ||
| 32 | |||
| 33 | } | ||
| 34 | |||
diff --git a/src/main/scala/uk/ac/ox/cs/acqua/implicits/RSACombAnswerTuples.scala b/src/main/scala/uk/ac/ox/cs/acqua/implicits/RSACombAnswerTuples.scala index 4f19e62..d0bba72 100644 --- a/src/main/scala/uk/ac/ox/cs/acqua/implicits/RSACombAnswerTuples.scala +++ b/src/main/scala/uk/ac/ox/cs/acqua/implicits/RSACombAnswerTuples.scala | |||
| @@ -44,20 +44,20 @@ object RSACombAnswerTuples { | |||
| 44 | val answers: ConjunctiveQueryAnswers | 44 | val answers: ConjunctiveQueryAnswers |
| 45 | ) extends AnswerTuples { | 45 | ) extends AnswerTuples { |
| 46 | 46 | ||
| 47 | /* TODO: this might not be the best choice, since the internal | 47 | /* Iterator simulated using an index over an [[IndexedSeq]] |
| 48 | * iterator in a collection is a single traverse iterator. | 48 | * |
| 49 | * We might be messing with internal state. | 49 | * This might not be the best solution, but at least it offers |
| 50 | * better flexibility than using the internal [[Seq]] iterator. | ||
| 51 | * On top of this, indexed access is guaranteed to be efficient. | ||
| 50 | */ | 52 | */ |
| 51 | private var iter = answers.answers.iterator | 53 | private var iter = answers.answers.map(_._2).toIndexedSeq |
| 54 | private var idx: Int = 0 | ||
| 52 | 55 | ||
| 53 | /** Reset the iterator over the answers. | 56 | /** Reset the iterator over the answers. */ |
| 54 | * | 57 | def reset(): Unit = idx = 0 |
| 55 | * @note this operation is currently not supported. | ||
| 56 | */ | ||
| 57 | def reset(): Unit = ??? | ||
| 58 | 58 | ||
| 59 | /** True if the iterator can provide more items. */ | 59 | /** True if the iterator can provide more items. */ |
| 60 | def isValid: Boolean = iter.hasNext | 60 | def isValid: Boolean = idx < iter.length |
| 61 | 61 | ||
| 62 | /** Get arity of answer variables. */ | 62 | /** Get arity of answer variables. */ |
| 63 | def getArity: Int = answers.query.answer.length | 63 | def getArity: Int = answers.query.answer.length |
| @@ -67,30 +67,31 @@ object RSACombAnswerTuples { | |||
| 67 | answers.query.answer.map(_.getName).toArray | 67 | answers.query.answer.map(_.getName).toArray |
| 68 | 68 | ||
| 69 | /** Advance iterator state */ | 69 | /** Advance iterator state */ |
| 70 | def moveNext(): Unit = { } | 70 | def moveNext(): Unit = idx += 1 |
| 71 | 71 | ||
| 72 | /** Get next [[uk.ac.ox.cs.pagoda.query.AnswerTuple]] from the iterator */ | 72 | /** Get next [[uk.ac.ox.cs.pagoda.query.AnswerTuple]] from the iterator */ |
| 73 | def getTuple: AnswerTuple = iter.next() | 73 | def getTuple: AnswerTuple = iter(idx) |
| 74 | 74 | ||
| 75 | /** Return true if the input tuple is part of this collection. | 75 | /** Return true if the input tuple is part of this collection. |
| 76 | * | 76 | * |
| 77 | * @param tuple the answer to be checked. | 77 | * @param tuple the answer to be checked. |
| 78 | * | ||
| 79 | * @note this operation is currently not supported. | ||
| 78 | */ | 80 | */ |
| 79 | def contains(tuple: AnswerTuple): Boolean = | 81 | def contains(tuple: AnswerTuple): Boolean = ??? |
| 80 | answers.contains(tuple) | ||
| 81 | 82 | ||
| 82 | /** Skip one item in the iterator. | 83 | /** Skip one item in the iterator. |
| 83 | * | 84 | * |
| 84 | * @note that the semantic of this method is not clear to the | 85 | * @note that the semantic of this method is not clear to the |
| 85 | * author and the description is just an assumption. | 86 | * author and the description is just an assumption. |
| 86 | */ | 87 | */ |
| 87 | def remove(): Unit = iter.next() | 88 | def remove(): Unit = moveNext() |
| 88 | } | 89 | } |
| 89 | 90 | ||
| 90 | /** Implicit convertion from RSAComb-style answers to [[uk.ac.ox.cs.pagoda.query.AnswerTuple]] */ | 91 | /** Implicit convertion from RSAComb-style answers to [[uk.ac.ox.cs.pagoda.query.AnswerTuple]] */ |
| 91 | private implicit def asAnswerTuple( | 92 | private implicit def asAnswerTuple( |
| 92 | answer: (Long,Seq[Resource]) | 93 | answer: Seq[Resource] |
| 93 | ): AnswerTuple = new AnswerTuple(answer._2.map(res => | 94 | ): AnswerTuple = new AnswerTuple(answer.map(res => |
| 94 | res match { | 95 | res match { |
| 95 | case r: IRI => OldIndividual.create(r.getIRI) | 96 | case r: IRI => OldIndividual.create(r.getIRI) |
| 96 | case r: BlankNode => OldBlankNode.create(r.getID) | 97 | case r: BlankNode => OldBlankNode.create(r.getID) |
diff --git a/src/main/scala/uk/ac/ox/cs/acqua/reasoner/AcquaQueryReasoner.scala b/src/main/scala/uk/ac/ox/cs/acqua/reasoner/AcquaQueryReasoner.scala index 4fe32d8..0aa5ff2 100644 --- a/src/main/scala/uk/ac/ox/cs/acqua/reasoner/AcquaQueryReasoner.scala +++ b/src/main/scala/uk/ac/ox/cs/acqua/reasoner/AcquaQueryReasoner.scala | |||
| @@ -16,13 +16,14 @@ | |||
| 16 | 16 | ||
| 17 | package uk.ac.ox.cs.acqua.reasoner | 17 | package uk.ac.ox.cs.acqua.reasoner |
| 18 | 18 | ||
| 19 | import java.util.LinkedList; | ||
| 20 | |||
| 19 | import scala.collection.JavaConverters._ | 21 | import scala.collection.JavaConverters._ |
| 20 | import org.semanticweb.karma2.profile.ELHOProfile | 22 | import org.semanticweb.karma2.profile.ELHOProfile |
| 21 | import org.semanticweb.owlapi.model.OWLOntology | 23 | import org.semanticweb.owlapi.model.OWLOntology |
| 22 | // import org.semanticweb.owlapi.model.parameters.Imports; | 24 | import org.semanticweb.owlapi.model.parameters.Imports |
| 23 | // import uk.ac.ox.cs.JRDFox.JRDFStoreException; | 25 | import uk.ac.ox.cs.JRDFox.JRDFStoreException; |
| 24 | import uk.ac.ox.cs.pagoda.multistage.MultiStageQueryEngine | 26 | import uk.ac.ox.cs.pagoda.multistage.MultiStageQueryEngine |
| 25 | // import uk.ac.ox.cs.pagoda.owl.EqualitiesEliminator; | ||
| 26 | import uk.ac.ox.cs.pagoda.owl.OWLHelper | 27 | import uk.ac.ox.cs.pagoda.owl.OWLHelper |
| 27 | import uk.ac.ox.cs.pagoda.query.{ | 28 | import uk.ac.ox.cs.pagoda.query.{ |
| 28 | AnswerTuples, | 29 | AnswerTuples, |
| @@ -30,7 +31,7 @@ import uk.ac.ox.cs.pagoda.query.{ | |||
| 30 | GapByStore4ID2, | 31 | GapByStore4ID2, |
| 31 | QueryRecord, | 32 | QueryRecord, |
| 32 | } | 33 | } |
| 33 | import uk.ac.ox.cs.pagoda.query.QueryRecord.Step; | 34 | import uk.ac.ox.cs.pagoda.query.QueryRecord.Step |
| 34 | import uk.ac.ox.cs.pagoda.reasoner.{ | 35 | import uk.ac.ox.cs.pagoda.reasoner.{ |
| 35 | ConsistencyManager, | 36 | ConsistencyManager, |
| 36 | MyQueryReasoner, | 37 | MyQueryReasoner, |
| @@ -38,28 +39,29 @@ import uk.ac.ox.cs.pagoda.reasoner.{ | |||
| 38 | } | 39 | } |
| 39 | import uk.ac.ox.cs.pagoda.reasoner.light.{KarmaQueryEngine,BasicQueryEngine} | 40 | import uk.ac.ox.cs.pagoda.reasoner.light.{KarmaQueryEngine,BasicQueryEngine} |
| 40 | import uk.ac.ox.cs.pagoda.rules.DatalogProgram | 41 | import uk.ac.ox.cs.pagoda.rules.DatalogProgram |
| 41 | // import uk.ac.ox.cs.pagoda.summary.HermitSummaryFilter; | 42 | import uk.ac.ox.cs.pagoda.summary.HermitSummaryFilter; |
| 42 | // import uk.ac.ox.cs.pagoda.tracking.QueryTracker; | ||
| 43 | import uk.ac.ox.cs.pagoda.tracking.{ | 43 | import uk.ac.ox.cs.pagoda.tracking.{ |
| 44 | QueryTracker, | ||
| 44 | TrackingRuleEncoder, | 45 | TrackingRuleEncoder, |
| 45 | TrackingRuleEncoderDisjVar1, | 46 | TrackingRuleEncoderDisjVar1, |
| 46 | TrackingRuleEncoderWithGap, | 47 | TrackingRuleEncoderWithGap, |
| 47 | } | 48 | } |
| 48 | // import uk.ac.ox.cs.pagoda.util.ExponentialInterpolation; | 49 | import uk.ac.ox.cs.pagoda.util.{ |
| 49 | // import uk.ac.ox.cs.pagoda.util.PagodaProperties; | 50 | ExponentialInterpolation, |
| 50 | import uk.ac.ox.cs.pagoda.util.Timer | 51 | PagodaProperties, |
| 51 | import uk.ac.ox.cs.pagoda.util.Utility | 52 | Timer, |
| 52 | // import uk.ac.ox.cs.pagoda.util.disposable.DisposedException; | 53 | Utility |
| 54 | } | ||
| 53 | import uk.ac.ox.cs.pagoda.util.tuples.Tuple; | 55 | import uk.ac.ox.cs.pagoda.util.tuples.Tuple; |
| 54 | import uk.ac.ox.cs.rsacomb.ontology.Ontology | 56 | import uk.ac.ox.cs.rsacomb.ontology.Ontology |
| 55 | import uk.ac.ox.cs.rsacomb.approximation.{Lowerbound,Upperbound} | 57 | import uk.ac.ox.cs.rsacomb.approximation.{Lowerbound,Upperbound} |
| 56 | 58 | ||
| 57 | // import java.util.Collection; | ||
| 58 | // import java.util.LinkedList; | ||
| 59 | |||
| 60 | class AcquaQueryReasoner(val ontology: Ontology) | 59 | class AcquaQueryReasoner(val ontology: Ontology) |
| 61 | extends QueryReasoner { | 60 | extends QueryReasoner { |
| 62 | 61 | ||
| 62 | /** Compatibility convertions between PAGOdA and RSAComb */ | ||
| 63 | import uk.ac.ox.cs.acqua.implicits.PagodaConverters._ | ||
| 64 | |||
| 63 | private var encoder: Option[TrackingRuleEncoder] = None | 65 | private var encoder: Option[TrackingRuleEncoder] = None |
| 64 | private var lazyUpperStore: Option[MultiStageQueryEngine] = None; | 66 | private var lazyUpperStore: Option[MultiStageQueryEngine] = None; |
| 65 | 67 | ||
| @@ -167,9 +169,11 @@ class AcquaQueryReasoner(val ontology: Ontology) | |||
| 167 | if (!isConsistent()) return false | 169 | if (!isConsistent()) return false |
| 168 | consistencyManager.extractBottomFragment(); | 170 | consistencyManager.extractBottomFragment(); |
| 169 | 171 | ||
| 170 | /* Force computation of lower/upper RSA approximations */ | 172 | /* Force computation of lower RSA approximations and its canonical |
| 171 | lowerRSAOntology//.computeCanonicalModel() | 173 | * model. We wait to process the upperbound since it might not be |
| 172 | upperRSAOntology//.computeCanonicalModel() | 174 | * necessary after all. */ |
| 175 | lowerRSAOntology.computeCanonicalModel() | ||
| 176 | //upperRSAOntology.computeCanonicalModel() | ||
| 173 | 177 | ||
| 174 | true | 178 | true |
| 175 | } | 179 | } |
| @@ -190,127 +194,152 @@ class AcquaQueryReasoner(val ontology: Ontology) | |||
| 190 | return _isConsistent.asBoolean | 194 | return _isConsistent.asBoolean |
| 191 | } | 195 | } |
| 192 | 196 | ||
| 197 | /** Evaluate a query against this reasoner. | ||
| 198 | * | ||
| 199 | * This is the main entry to compute the answers to a query. | ||
| 200 | * By the end of the computation, the query record passed as input | ||
| 201 | * will contain the answers found during the answering process. | ||
| 202 | * This behaves conservately and will try very hard not to perform | ||
| 203 | * unnecessary computation. | ||
| 204 | * | ||
| 205 | * @param query the query record to evaluate. | ||
| 206 | */ | ||
| 193 | def evaluate(query: QueryRecord): Unit = { | 207 | def evaluate(query: QueryRecord): Unit = { |
| 194 | if(queryLowerAndUpperBounds(query)) | 208 | val processed = |
| 195 | return; | 209 | queryLowerAndUpperBounds(query) || |
| 196 | 210 | queryRSALowerBound(query) || | |
| 197 | // OWLOntology relevantOntologySubset = extractRelevantOntologySubset(queryRecord); | 211 | queryRSAUpperBound(query) |
| 198 | 212 | if (!processed) { | |
| 199 | //// queryRecord.saveRelevantOntology("/home/alessandro/Desktop/test-relevant-ontology-"+relevantOntologiesCounter+".owl"); | 213 | val relevantOntologySubset: OWLOntology = |
| 200 | //// relevantOntologiesCounter++; | 214 | extractRelevantOntologySubset(query) |
| 201 | 215 | ||
| 202 | // if(properties.getSkolemUpperBound() == PagodaProperties.SkolemUpperBoundOptions.BEFORE_SUMMARISATION | 216 | if (properties.getSkolemUpperBound == PagodaProperties.SkolemUpperBoundOptions.BEFORE_SUMMARISATION && |
| 203 | // && querySkolemisedRelevantSubset(relevantOntologySubset, queryRecord)) { | 217 | querySkolemisedRelevantSubset(relevantOntologySubset, query) |
| 204 | // return; | 218 | ) return; |
| 205 | // } | 219 | |
| 206 | 220 | Utility logInfo ">> Summarisation <<" | |
| 207 | // Utility.logInfo(">> Summarisation <<"); | 221 | val summarisedChecker: HermitSummaryFilter = |
| 208 | // HermitSummaryFilter summarisedChecker = new HermitSummaryFilter(queryRecord, properties.getToCallHermiT()); | 222 | new HermitSummaryFilter(query, properties.getToCallHermiT) |
| 209 | // if(summarisedChecker.check(queryRecord.getGapAnswers()) == 0) { | 223 | if(summarisedChecker.check(query.getGapAnswers) == 0) { |
| 210 | // summarisedChecker.dispose(); | 224 | summarisedChecker.dispose() |
| 211 | // return; | 225 | return; |
| 212 | // } | 226 | } |
| 213 | 227 | ||
| 214 | // if(properties.getSkolemUpperBound() == PagodaProperties.SkolemUpperBoundOptions.AFTER_SUMMARISATION | 228 | if (properties.getSkolemUpperBound == PagodaProperties.SkolemUpperBoundOptions.AFTER_SUMMARISATION && |
| 215 | // && querySkolemisedRelevantSubset(relevantOntologySubset, queryRecord)) { | 229 | querySkolemisedRelevantSubset(relevantOntologySubset, query) |
| 216 | // summarisedChecker.dispose(); | 230 | ) { |
| 217 | // return; | 231 | summarisedChecker.dispose() |
| 218 | // } | 232 | return; |
| 219 | 233 | } | |
| 220 | // Utility.logInfo(">> Full reasoning <<"); | 234 | |
| 221 | // Timer t = new Timer(); | 235 | Utility logInfo ">> Full reasoning <<" |
| 222 | // summarisedChecker.checkByFullReasoner(queryRecord.getGapAnswers()); | 236 | timer.reset() |
| 223 | // Utility.logDebug("Total time for full reasoner: " + t.duration()); | 237 | summarisedChecker checkByFullReasoner query.getGapAnswers |
| 224 | 238 | Utility logDebug s"Total time for full reasoner: ${timer.duration()}" | |
| 225 | // if(properties.getToCallHermiT()) | 239 | |
| 226 | // queryRecord.markAsProcessed(); | 240 | if (properties.getToCallHermiT) query.markAsProcessed() |
| 227 | // summarisedChecker.dispose(); | 241 | |
| 228 | ??? | 242 | summarisedChecker.dispose() |
| 243 | } | ||
| 244 | } | ||
| 245 | |||
| 246 | /** Only compute the upperbound for a query. | ||
| 247 | * | ||
| 248 | * @note this is not supported at the moment. Look at | ||
| 249 | * [[uk.ac.ox.cs.pagoda.reasoner.MyQueryReasoner]] for an example | ||
| 250 | * implementation. | ||
| 251 | */ | ||
| 252 | def evaluateUpper(record: QueryRecord): Unit = ??? | ||
| 253 | |||
| 254 | /** Clean up the query reasoner */ | ||
| 255 | override def dispose(): Unit = { | ||
| 256 | super.dispose() | ||
| 257 | if(encoder.isDefined) encoder.get.dispose() | ||
| 258 | if(rlLowerStore != null) rlLowerStore.dispose(); | ||
| 259 | if(lazyUpperStore.isDefined) lazyUpperStore.get.dispose(); | ||
| 260 | if(elLowerStore != null) elLowerStore.dispose(); | ||
| 261 | if(trackingStore != null) trackingStore.dispose(); | ||
| 262 | if(consistencyManager != null) consistencyManager.dispose(); | ||
| 263 | if(datalog != null) datalog.dispose(); | ||
| 264 | } | ||
| 265 | |||
| 266 | /** Perform CQ anwering for a specific upper bound engine. | ||
| 267 | * | ||
| 268 | * @param store upper bound engine to be used in the computation. | ||
| 269 | * @param query query record. | ||
| 270 | * @param queryText actual text of the query to be executed. | ||
| 271 | * @param answerVariables answer variables for the query. | ||
| 272 | */ | ||
| 273 | private def queryUpperBound( | ||
| 274 | store: BasicQueryEngine, | ||
| 275 | query: QueryRecord, | ||
| 276 | queryText: String, | ||
| 277 | answerVariables: Array[String] | ||
| 278 | ): Unit = { | ||
| 279 | var rlAnswer: AnswerTuples = null | ||
| 280 | try { | ||
| 281 | Utility logDebug queryText | ||
| 282 | rlAnswer = store.evaluate(queryText, answerVariables) | ||
| 283 | Utility logDebug timer.duration() | ||
| 284 | query updateUpperBoundAnswers rlAnswer | ||
| 285 | } finally { | ||
| 286 | if (rlAnswer != null) rlAnswer.dispose() | ||
| 287 | } | ||
| 229 | } | 288 | } |
| 230 | 289 | ||
| 231 | def evaluateUpper(record: QueryRecord): Unit= ??? | 290 | /** Perform CQ anwering for a specific upper bound engine. |
| 232 | 291 | * | |
| 233 | // @Override | 292 | * @param store upper bound engine to be used in the computation. |
| 234 | // public void evaluate(QueryRecord queryRecord) { | 293 | * @param query query record. |
| 235 | // } | 294 | * @param extendedQuery extended version of the query. |
| 236 | 295 | * @param step difficulty of the current step. | |
| 237 | // @Override | 296 | * @returns whether the query has been fully answered, i.e., the |
| 238 | // public void evaluateUpper(QueryRecord queryRecord) { | 297 | * bounds computed so far coincide. |
| 239 | // if(isDisposed()) throw new DisposedException(); | 298 | * |
| 240 | // // TODO? add new upper store | 299 | * @note It deals with blanks nodes differently from variables |
| 241 | // AnswerTuples rlAnswer = null; | 300 | * according to SPARQL semantics for OWL2 Entailment Regime. In |
| 242 | // boolean useFull = queryRecord.isBottom() || lazyUpperStore == null; | 301 | * particular variables are matched only against named individuals, |
| 243 | // try { | 302 | * and blank nodes against named and anonymous individuals. |
| 244 | // rlAnswer = | 303 | */ |
| 245 | // (useFull ? trackingStore : lazyUpperStore).evaluate(queryRecord.getQueryText(), queryRecord.getAnswerVariables()); | 304 | private def queryUpperStore( |
| 246 | // queryRecord.updateUpperBoundAnswers(rlAnswer, true); | 305 | upperStore: BasicQueryEngine, |
| 247 | // } finally { | 306 | query: QueryRecord, |
| 248 | // if(rlAnswer != null) rlAnswer.dispose(); | 307 | extendedQuery: Tuple[String], |
| 249 | // } | 308 | step: Step |
| 250 | // } | 309 | ): Boolean = { |
| 251 | 310 | timer.reset(); | |
| 252 | // @Override | 311 | |
| 253 | // public void dispose() { | 312 | Utility logDebug "First query type" |
| 254 | // super.dispose(); | 313 | queryUpperBound(upperStore, query, query.getQueryText, query.getAnswerVariables) |
| 255 | 314 | if (!query.isProcessed() && !query.getQueryText().equals(extendedQuery.get(0))) { | |
| 256 | // if(encoder != null) encoder.dispose(); | 315 | Utility logDebug "Second query type" |
| 257 | // if(rlLowerStore != null) rlLowerStore.dispose(); | 316 | queryUpperBound(upperStore, query, extendedQuery.get(0), query.getAnswerVariables) |
| 258 | // if(lazyUpperStore != null) lazyUpperStore.dispose(); | 317 | } |
| 259 | // if(elLowerStore != null) elLowerStore.dispose(); | 318 | if (!query.isProcessed() && query.hasNonAnsDistinguishedVariables()) { |
| 260 | // if(trackingStore != null) trackingStore.dispose(); | 319 | Utility logDebug "Third query type" |
| 261 | // if(consistency != null) consistency.dispose(); | 320 | queryUpperBound(upperStore, query, extendedQuery.get(1), query.getDistinguishedVariables) |
| 262 | // if(program != null) program.dispose(); | 321 | } |
| 263 | // } | 322 | |
| 264 | 323 | query.addProcessingTime(step, timer.duration()) | |
| 265 | // protected void internal_importDataFile(String name, String datafile) { | 324 | if (query.isProcessed()) query.setDifficulty(step) |
| 266 | //// addDataFile(datafile); | 325 | query.isProcessed() |
| 267 | // rlLowerStore.importRDFData(name, datafile); | 326 | } |
| 268 | // if(lazyUpperStore != null) | 327 | |
| 269 | // lazyUpperStore.importRDFData(name, datafile); | 328 | /** Computes the bounds to the answers for a query. |
| 270 | // elLowerStore.importRDFData(name, datafile); | 329 | * |
| 271 | // trackingStore.importRDFData(name, datafile); | 330 | * Both the lower (RL + ELHO) and upper bounds are computed here. |
| 272 | // } | 331 | * |
| 273 | 332 | * @param query the query to be executed | |
| 274 | // /** | 333 | * @returns whether the query has been fully answered, i.e., the |
| 275 | // * It deals with blanks nodes differently from variables | 334 | * bounds computed so far coincide. |
| 276 | // * according to SPARQL semantics for OWL2 Entailment Regime. | 335 | */ |
| 277 | // * <p> | ||
| 278 | // * In particular variables are matched only against named individuals, | ||
| 279 | // * and blank nodes against named and anonymous individuals. | ||
| 280 | // */ | ||
| 281 | // private boolean queryUpperStore(BasicQueryEngine upperStore, QueryRecord queryRecord, | ||
| 282 | // Tuple<String> extendedQuery, Step step) { | ||
| 283 | // t.reset(); | ||
| 284 | |||
| 285 | // Utility.logDebug("First query type"); | ||
| 286 | // queryUpperBound(upperStore, queryRecord, queryRecord.getQueryText(), queryRecord.getAnswerVariables()); | ||
| 287 | // if(!queryRecord.isProcessed() && !queryRecord.getQueryText().equals(extendedQuery.get(0))) { | ||
| 288 | // Utility.logDebug("Second query type"); | ||
| 289 | // queryUpperBound(upperStore, queryRecord, extendedQuery.get(0), queryRecord.getAnswerVariables()); | ||
| 290 | // } | ||
| 291 | // if(!queryRecord.isProcessed() && queryRecord.hasNonAnsDistinguishedVariables()) { | ||
| 292 | // Utility.logDebug("Third query type"); | ||
| 293 | // queryUpperBound(upperStore, queryRecord, extendedQuery.get(1), queryRecord.getDistinguishedVariables()); | ||
| 294 | // } | ||
| 295 | |||
| 296 | // queryRecord.addProcessingTime(step, t.duration()); | ||
| 297 | // if(queryRecord.isProcessed()) { | ||
| 298 | // queryRecord.setDifficulty(step); | ||
| 299 | // return true; | ||
| 300 | // } | ||
| 301 | // return false; | ||
| 302 | // } | ||
| 303 | |||
| 304 | /** | ||
| 305 | * Returns the part of the ontology relevant for Hermit, while computing the bound answers. | ||
| 306 | */ | ||
| 307 | private def queryLowerAndUpperBounds(query: QueryRecord): Boolean = { | 336 | private def queryLowerAndUpperBounds(query: QueryRecord): Boolean = { |
| 308 | Utility logInfo ">> Base bounds <<" | 337 | Utility logInfo ">> Base bounds <<" |
| 309 | val extendedQueryTexts: Tuple[String] = query.getExtendedQueryText() | 338 | val extendedQueryTexts: Tuple[String] = query.getExtendedQueryText() |
| 310 | var rlAnswer: AnswerTuples = null | 339 | var rlAnswer: AnswerTuples = null |
| 311 | var elAnswer: AnswerTuples = null | 340 | var elAnswer: AnswerTuples = null |
| 312 | 341 | ||
| 313 | /* Computing RL lower bound answers */ | 342 | /* Compute RL lower bound answers */ |
| 314 | timer.reset(); | 343 | timer.reset(); |
| 315 | try { | 344 | try { |
| 316 | rlAnswer = rlLowerStore.evaluate(query.getQueryText, query.getAnswerVariables) | 345 | rlAnswer = rlLowerStore.evaluate(query.getQueryText, query.getAnswerVariables) |
| @@ -321,148 +350,182 @@ class AcquaQueryReasoner(val ontology: Ontology) | |||
| 321 | } | 350 | } |
| 322 | query.addProcessingTime(Step.LOWER_BOUND, timer.duration()); | 351 | query.addProcessingTime(Step.LOWER_BOUND, timer.duration()); |
| 323 | 352 | ||
| 353 | /* Compute upper bound answers */ | ||
| 324 | if(properties.getUseAlwaysSimpleUpperBound() || lazyUpperStore.isEmpty) { | 354 | if(properties.getUseAlwaysSimpleUpperBound() || lazyUpperStore.isEmpty) { |
| 325 | Utility logDebug "Tracking store" | 355 | Utility logDebug "Tracking store" |
| 326 | // if (queryUpperStore(trackingStore, query, extendedQueryTexts, Step.SIMPLE_UPPER_BOUND)) | 356 | if (queryUpperStore(trackingStore, query, extendedQueryTexts, Step.SIMPLE_UPPER_BOUND)) |
| 327 | // return true; | 357 | return true; |
| 358 | } | ||
| 359 | if (!query.isBottom) { | ||
| 360 | Utility logDebug "Lazy store" | ||
| 361 | if (lazyUpperStore.isDefined && queryUpperStore(lazyUpperStore.get, query, extendedQueryTexts, Step.LAZY_UPPER_BOUND)) | ||
| 362 | return true | ||
| 328 | } | 363 | } |
| 329 | 364 | ||
| 330 | // if(!queryRecord.isBottom()) { | 365 | timer.reset() |
| 331 | // Utility.logDebug("Lazy store"); | 366 | /* Compute ELHO lower bound answers */ |
| 332 | // if(lazyUpperStore != null && queryUpperStore(lazyUpperStore, queryRecord, extendedQueryTexts, Step.LAZY_UPPER_BOUND)) | 367 | try { |
| 333 | // return true; | 368 | elAnswer = elLowerStore.evaluate( |
| 334 | // } | 369 | extendedQueryTexts.get(0), |
| 335 | 370 | query.getAnswerVariables, | |
| 336 | // t.reset(); | 371 | query.getLowerBoundAnswers |
| 337 | // try { | 372 | ) |
| 338 | // elAnswer = elLowerStore.evaluate(extendedQueryTexts.get(0), | 373 | Utility logDebug timer.duration() |
| 339 | // queryRecord.getAnswerVariables(), | 374 | query updateLowerBoundAnswers elAnswer |
| 340 | // queryRecord.getLowerBoundAnswers()); | 375 | } finally { |
| 341 | // Utility.logDebug(t.duration()); | 376 | if (elAnswer != null) elAnswer.dispose() |
| 342 | // queryRecord.updateLowerBoundAnswers(elAnswer); | 377 | } |
| 343 | // } finally { | 378 | query.addProcessingTime(Step.EL_LOWER_BOUND, timer.duration()) |
| 344 | // if(elAnswer != null) elAnswer.dispose(); | 379 | |
| 345 | // } | 380 | if (query.isProcessed()) query.setDifficulty(Step.EL_LOWER_BOUND) |
| 346 | // queryRecord.addProcessingTime(Step.EL_LOWER_BOUND, t.duration()); | 381 | query.isProcessed() |
| 347 | 382 | } | |
| 348 | // if(queryRecord.isProcessed()) { | 383 | |
| 349 | // queryRecord.setDifficulty(Step.EL_LOWER_BOUND); | 384 | /** Compute lower bound using RSAComb. |
| 350 | // return true; | 385 | * |
| 351 | // } | 386 | * @param query query record to update. |
| 352 | 387 | * @returns true if the query is fully answered. | |
| 353 | return false; | 388 | */ |
| 389 | private def queryRSALowerBound(query: QueryRecord): Boolean = { | ||
| 390 | import uk.ac.ox.cs.acqua.implicits.RSACombAnswerTuples._ | ||
| 391 | val answers = lowerRSAOntology ask query | ||
| 392 | query updateLowerBoundAnswers answers | ||
| 393 | query.isProcessed | ||
| 354 | } | 394 | } |
| 355 | 395 | ||
| 356 | // private OWLOntology extractRelevantOntologySubset(QueryRecord queryRecord) { | 396 | /** Compute upper bound using RSAComb. |
| 357 | // Utility.logInfo(">> Relevant ontology-subset extraction <<"); | 397 | * |
| 358 | 398 | * @param query query record to update. | |
| 359 | // t.reset(); | 399 | * @returns true if the query is fully answered. |
| 360 | 400 | */ | |
| 361 | // QueryTracker tracker = new QueryTracker(encoder, rlLowerStore, queryRecord); | 401 | private def queryRSAUpperBound(query: QueryRecord): Boolean = { |
| 362 | // OWLOntology relevantOntologySubset = tracker.extract(trackingStore, consistency.getQueryRecords(), true); | 402 | import uk.ac.ox.cs.acqua.implicits.RSACombAnswerTuples._ |
| 363 | 403 | val answers = upperRSAOntology ask query | |
| 364 | // queryRecord.addProcessingTime(Step.FRAGMENT, t.duration()); | 404 | query updateUpperBoundAnswers answers |
| 365 | 405 | query.isProcessed | |
| 366 | // int numOfABoxAxioms = relevantOntologySubset.getABoxAxioms(Imports.INCLUDED).size(); | 406 | } |
| 367 | // int numOfTBoxAxioms = relevantOntologySubset.getAxiomCount() - numOfABoxAxioms; | 407 | |
| 368 | // Utility.logInfo("Relevant ontology-subset has been extracted: |ABox|=" | 408 | /** Extract a subset of the ontology relevant to the query. |
| 369 | // + numOfABoxAxioms + ", |TBox|=" + numOfTBoxAxioms); | 409 | * |
| 370 | 410 | * @param query query record for which the subset ontology is computed. | |
| 371 | // return relevantOntologySubset; | 411 | * @returns an [[OWLOntology]] subset of the input ontology. |
| 372 | // } | 412 | */ |
| 373 | 413 | private def extractRelevantOntologySubset(query: QueryRecord): OWLOntology = { | |
| 374 | // private void queryUpperBound(BasicQueryEngine upperStore, QueryRecord queryRecord, String queryText, String[] answerVariables) { | 414 | Utility logInfo ">> Relevant ontology-subset extraction <<" |
| 375 | // AnswerTuples rlAnswer = null; | 415 | |
| 376 | // try { | 416 | timer.reset() |
| 377 | // Utility.logDebug(queryText); | 417 | |
| 378 | // rlAnswer = upperStore.evaluate(queryText, answerVariables); | 418 | val tracker: QueryTracker = |
| 379 | // Utility.logDebug(t.duration()); | 419 | new QueryTracker(encoder.get, rlLowerStore, query) |
| 380 | // queryRecord.updateUpperBoundAnswers(rlAnswer); | 420 | val relevantOntologySubset: OWLOntology = |
| 381 | // } finally { | 421 | tracker.extract( trackingStore, consistencyManager.getQueryRecords, true) |
| 382 | // if(rlAnswer != null) rlAnswer.dispose(); | 422 | |
| 383 | // } | 423 | query.addProcessingTime(Step.FRAGMENT, timer.duration()) |
| 384 | // } | 424 | |
| 385 | 425 | val numOfABoxAxioms: Int = relevantOntologySubset.getABoxAxioms(Imports.INCLUDED).size | |
| 386 | // private boolean querySkolemisedRelevantSubset(OWLOntology relevantSubset, QueryRecord queryRecord) { | 426 | val numOfTBoxAxioms: Int = relevantOntologySubset.getAxiomCount() - numOfABoxAxioms |
| 387 | // Utility.logInfo(">> Semi-Skolemisation <<"); | 427 | Utility logInfo s"Relevant ontology-subset has been extracted: |ABox|=$numOfABoxAxioms, |TBox|=$numOfTBoxAxioms" |
| 388 | // t.reset(); | 428 | |
| 389 | 429 | return relevantOntologySubset | |
| 390 | // DatalogProgram relevantProgram = new DatalogProgram(relevantSubset); | 430 | } |
| 391 | 431 | ||
| 392 | // MultiStageQueryEngine relevantStore = | 432 | /** Query the skolemized ontology subset relevant to a query record. |
| 393 | // new MultiStageQueryEngine("Relevant-store", true); // checkValidity is true | 433 | * |
| 394 | 434 | * @param relevantSubset the relevant ontology subset. | |
| 395 | // relevantStore.importDataFromABoxOf(relevantSubset); | 435 | * @param query the query to be answered. |
| 396 | // String relevantOriginalMarkProgram = OWLHelper.getOriginalMarkProgram(relevantSubset); | 436 | * @returns true if the query has been fully answered. |
| 397 | 437 | * | |
| 398 | // relevantStore.materialise("Mark original individuals", relevantOriginalMarkProgram); | 438 | * TODO: the code has been adapted from [[uk.ac.ox.cs.pagoda.reasoner.MyQueryReasoner]] |
| 399 | 439 | * and ported to Scala. There are better, more Scala-esque ways of | |
| 400 | // boolean isFullyProcessed = false; | 440 | * deal with the big `while` in this function, but this should work |
| 401 | // LinkedList<Tuple<Long>> lastTwoTriplesCounts = new LinkedList<>(); | 441 | * for now. |
| 402 | // for (int currentMaxTermDepth = 1; !isFullyProcessed; currentMaxTermDepth++) { | 442 | */ |
| 403 | 443 | private def querySkolemisedRelevantSubset( | |
| 404 | // if(currentMaxTermDepth > properties.getSkolemDepth()) { | 444 | relevantSubset: OWLOntology, |
| 405 | // Utility.logInfo("Maximum term depth reached"); | 445 | query: QueryRecord |
| 406 | // break; | 446 | ): Boolean = { |
| 407 | // } | 447 | Utility logInfo ">> Semi-Skolemisation <<" |
| 408 | 448 | timer.reset() | |
| 409 | // if(lastTwoTriplesCounts.size() == 2) { | 449 | |
| 410 | // if(lastTwoTriplesCounts.get(0).get(1).equals(lastTwoTriplesCounts.get(1).get(1))) | 450 | val relevantProgram: DatalogProgram = new DatalogProgram(relevantSubset) |
| 411 | // break; | 451 | val relevantStore: MultiStageQueryEngine = |
| 412 | 452 | new MultiStageQueryEngine("Relevant-store", true) | |
| 413 | // ExponentialInterpolation interpolation = new ExponentialInterpolation(lastTwoTriplesCounts.get(0).get(0), | 453 | relevantStore importDataFromABoxOf relevantSubset |
| 414 | // lastTwoTriplesCounts.get(0).get(1), | 454 | val relevantOriginalMarkProgram: String = |
| 415 | // lastTwoTriplesCounts.get(1).get(0), | 455 | OWLHelper getOriginalMarkProgram relevantSubset |
| 416 | // lastTwoTriplesCounts.get(1).get(1)); | 456 | relevantStore.materialise("Mark original individuals", relevantOriginalMarkProgram) |
| 417 | // double triplesEstimate = interpolation.computeValue(currentMaxTermDepth); | 457 | |
| 418 | 458 | var isFullyProcessed = false | |
| 419 | // Utility.logDebug("Estimate of the number of triples:" + triplesEstimate); | 459 | val lastTwoTriplesCounts: LinkedList[Tuple[Long]] = new LinkedList() |
| 420 | 460 | var currentMaxTermDepth = 1 | |
| 421 | // // exit condition if the query is not fully answered | 461 | var keepGoing = true |
| 422 | // if(triplesEstimate > properties.getMaxTriplesInSkolemStore()) { | 462 | while (!isFullyProcessed && keepGoing) { |
| 423 | // Utility.logInfo("Interrupting Semi-Skolemisation because of triples count limit"); | 463 | if (currentMaxTermDepth > properties.getSkolemDepth) { |
| 424 | // break; | 464 | Utility logInfo "Maximum term depth reached" |
| 425 | // } | 465 | keepGoing = false |
| 426 | // } | 466 | } else if ( |
| 427 | 467 | lastTwoTriplesCounts.size() == 2 && ( | |
| 428 | // Utility.logInfo("Trying with maximum depth " + currentMaxTermDepth); | 468 | lastTwoTriplesCounts.get(0).get(1).equals(lastTwoTriplesCounts.get(1).get(1)) || |
| 429 | 469 | { | |
| 430 | // int materialisationTag = relevantStore.materialiseSkolemly(relevantProgram, null, | 470 | val interpolation: ExponentialInterpolation = |
| 431 | // currentMaxTermDepth); | 471 | new ExponentialInterpolation( |
| 432 | // queryRecord.addProcessingTime(Step.SKOLEM_UPPER_BOUND, t.duration()); | 472 | lastTwoTriplesCounts.get(0).get(0), |
| 433 | // if(materialisationTag == -1) { | 473 | lastTwoTriplesCounts.get(0).get(1), |
| 434 | // relevantStore.dispose(); | 474 | lastTwoTriplesCounts.get(1).get(0), |
| 435 | // throw new Error("A consistent ontology has turned out to be " + | 475 | lastTwoTriplesCounts.get(1).get(1) |
| 436 | // "inconsistent in the Skolemises-relevant-upper-store"); | 476 | ) |
| 437 | // } | 477 | val triplesEstimate: Double = |
| 438 | // else if(materialisationTag != 1) { | 478 | interpolation computeValue currentMaxTermDepth |
| 439 | // Utility.logInfo("Semi-Skolemised relevant upper store cannot be employed"); | 479 | Utility logDebug s"Estimate of the number of triples: $triplesEstimate" |
| 440 | // break; | 480 | if (triplesEstimate > properties.getMaxTriplesInSkolemStore) |
| 441 | // } | 481 | Utility logInfo "Interrupting Semi-Skolemisation because of triples count limit" |
| 442 | 482 | triplesEstimate > properties.getMaxTriplesInSkolemStore | |
| 443 | // Utility.logInfo("Querying semi-Skolemised upper store..."); | 483 | } |
| 444 | // isFullyProcessed = queryUpperStore(relevantStore, queryRecord, | 484 | ) |
| 445 | // queryRecord.getExtendedQueryText(), | 485 | ) { |
| 446 | // Step.SKOLEM_UPPER_BOUND); | 486 | keepGoing = false |
| 447 | 487 | } else { | |
| 448 | // try { | 488 | Utility logInfo s"Trying with maximum depth $currentMaxTermDepth" |
| 449 | // lastTwoTriplesCounts.add | 489 | |
| 450 | // (new Tuple<>((long) currentMaxTermDepth, relevantStore.getStoreSize())); | 490 | val materialisationTag: Int = |
| 451 | // } catch (JRDFStoreException e) { | 491 | relevantStore.materialiseSkolemly(relevantProgram, null, currentMaxTermDepth) |
| 452 | // e.printStackTrace(); | 492 | query.addProcessingTime(Step.SKOLEM_UPPER_BOUND, timer.duration()) |
| 453 | // break; | 493 | if (materialisationTag == -1) { |
| 454 | // } | 494 | relevantStore.dispose() |
| 455 | // if(lastTwoTriplesCounts.size() > 2) | 495 | throw new Error("A consistent ontology has turned out to be inconsistent in the Skolemises-relevant-upper-store") |
| 456 | // lastTwoTriplesCounts.remove(); | 496 | } |
| 457 | 497 | ||
| 458 | // Utility.logDebug("Last two triples counts:" + lastTwoTriplesCounts); | 498 | if (materialisationTag != 1) { |
| 459 | // } | 499 | Utility logInfo "Semi-Skolemised relevant upper store cannot be employed" |
| 460 | 500 | keepGoing = false | |
| 461 | // relevantStore.dispose(); | 501 | } else { |
| 462 | // Utility.logInfo("Semi-Skolemised relevant upper store has been evaluated"); | 502 | Utility logInfo "Querying semi-Skolemised upper store..." |
| 463 | // return isFullyProcessed; | 503 | isFullyProcessed = queryUpperStore( |
| 464 | // } | 504 | relevantStore, query, query.getExtendedQueryText(), Step.SKOLEM_UPPER_BOUND |
| 505 | ) | ||
| 506 | |||
| 507 | try { | ||
| 508 | lastTwoTriplesCounts.add(new Tuple(currentMaxTermDepth, relevantStore.getStoreSize)) | ||
| 509 | if (lastTwoTriplesCounts.size() > 2) | ||
| 510 | lastTwoTriplesCounts.remove() | ||
| 511 | Utility logDebug s"Last two triples counts: $lastTwoTriplesCounts" | ||
| 512 | currentMaxTermDepth += 1 | ||
| 513 | } catch { | ||
| 514 | case e: JRDFStoreException => { | ||
| 515 | e.printStackTrace() | ||
| 516 | keepGoing = false | ||
| 517 | } | ||
| 518 | } | ||
| 519 | } | ||
| 520 | } | ||
| 521 | } | ||
| 522 | |||
| 523 | relevantStore.dispose() | ||
| 524 | Utility logInfo "Semi-Skolemised relevant upper store has been evaluated" | ||
| 525 | isFullyProcessed | ||
| 526 | } | ||
| 465 | 527 | ||
| 528 | /** Consistency status of the ontology */ | ||
| 466 | private sealed trait ConsistencyStatus { | 529 | private sealed trait ConsistencyStatus { |
| 467 | val asBoolean = false | 530 | val asBoolean = false |
| 468 | } | 531 | } |
diff --git a/src/main/scala/uk/ac/ox/cs/acqua/reasoner/RSAQueryReasoner.scala b/src/main/scala/uk/ac/ox/cs/acqua/reasoner/RSACombQueryReasoner.scala index a6c5276..6d89b7b 100644 --- a/src/main/scala/uk/ac/ox/cs/acqua/reasoner/RSAQueryReasoner.scala +++ b/src/main/scala/uk/ac/ox/cs/acqua/reasoner/RSACombQueryReasoner.scala | |||
| @@ -20,24 +20,20 @@ import java.util.Collection; | |||
| 20 | import scala.collection.JavaConverters._ | 20 | import scala.collection.JavaConverters._ |
| 21 | 21 | ||
| 22 | import org.semanticweb.owlapi.model.OWLOntology | 22 | import org.semanticweb.owlapi.model.OWLOntology |
| 23 | import uk.ac.ox.cs.rsacomb.ontology.RSAOntology | ||
| 24 | import uk.ac.ox.cs.rsacomb.approximation.{Approximation,Lowerbound} | 23 | import uk.ac.ox.cs.rsacomb.approximation.{Approximation,Lowerbound} |
| 25 | import uk.ac.ox.cs.rsacomb.ontology.Ontology | 24 | import uk.ac.ox.cs.rsacomb.ontology.{Ontology,RSAOntology} |
| 26 | import uk.ac.ox.cs.pagoda.query.QueryRecord | 25 | import uk.ac.ox.cs.pagoda.query.QueryRecord |
| 27 | import uk.ac.ox.cs.pagoda.reasoner.QueryReasoner | 26 | import uk.ac.ox.cs.pagoda.reasoner.QueryReasoner |
| 27 | import uk.ac.ox.cs.acqua.approximation.Noop | ||
| 28 | 28 | ||
| 29 | class RSAQueryReasoner(val origin: Ontology) extends QueryReasoner { | 29 | class RSACombQueryReasoner( |
| 30 | val origin: Ontology, | ||
| 31 | val toRSA: Approximation[RSAOntology] = Noop | ||
| 32 | ) extends QueryReasoner { | ||
| 30 | 33 | ||
| 31 | /* Implicit compatibility between PAGOdA and RSAComb types */ | 34 | /* Implicit compatibility between PAGOdA and RSAComb types */ |
| 32 | import uk.ac.ox.cs.acqua.implicits.PagodaConverters._ | 35 | import uk.ac.ox.cs.acqua.implicits.PagodaConverters._ |
| 33 | 36 | ||
| 34 | /** This class is instantiated when the input ontology is RSA. | ||
| 35 | * Approximation (via any algorithm with RSAOntology as target) | ||
| 36 | * doesn't perform anything, but is useful to turn a generic | ||
| 37 | * [[uk.ac.ox.cs.rsacomb.ontology.Ontology]] into an | ||
| 38 | * [[uk.ac.ox.cs.rsacomb.RSAOntology]]. | ||
| 39 | */ | ||
| 40 | private val toRSA: Approximation[RSAOntology] = new Lowerbound | ||
| 41 | val rsa: RSAOntology = origin approximate toRSA | 37 | val rsa: RSAOntology = origin approximate toRSA |
| 42 | 38 | ||
| 43 | /** Doesn't perform any action. | 39 | /** Doesn't perform any action. |
