|
1 |
| -# from ontolearn.semantic_caching import run_semantic_cache, run_non_semantic_cache |
2 |
| -# |
3 |
| - |
4 |
| -# class TestSemanticCache: |
5 |
| -# def setup_method(self): |
6 |
| -# self.path_kg = "KGs/Family/father.owl" #path to the father datasets |
7 |
| -# self.path_kge = None |
8 |
| -# self.symbolic_reasoner = "HermiT" |
9 |
| -# self.neural_reasoner = "EBR" |
10 |
| -# self.num_concepts = 800 |
11 |
| -# self.cache_size = 0.8*self.num_concepts |
12 |
| -# self.eviction = "LRU" |
13 |
| -# self.cache_type = "cold" |
14 |
| -# |
15 |
| -# def run_cache_tests(self, cache_semantic, cache_non_semantic): |
16 |
| -# assert cache_semantic["hit_ratio"] >= cache_non_semantic["hit_ratio"], f"Expected semantic caching to have higher hit ratio, but got {cache_semantic['hit_ratio']} vs {cache_non_semantic['hit_ratio']}" |
17 |
| -# assert cache_semantic["miss_ratio"] <= cache_non_semantic["miss_ratio"], f"Expected semantic caching to have lower miss ratio, but got {cache_semantic['miss_ratio']} vs {cache_non_semantic['miss_ratio']}" |
18 |
| -# |
19 |
| -# def test_jaccard(self): |
20 |
| -# |
21 |
| -# cache_neural,_ = run_semantic_cache(self.path_kg, self.path_kge, self.cache_size, self.neural_reasoner, self.eviction, 0, self.cache_type, True) |
22 |
| -# cache_symbolic,_ = run_semantic_cache(self.path_kg, self.path_kge, self.cache_size, self.symbolic_reasoner, self.eviction, 0, self.cache_type, True) |
23 |
| -# |
24 |
| -# assert float(cache_neural["avg_jaccard"]) >= float(cache_neural["avg_jaccard_reas"]), "Expected average Jaccard similarity to be at least as good as reasoner-based retrieval." |
25 |
| -# assert float(cache_symbolic["avg_jaccard"]) >= float(cache_symbolic["avg_jaccard_reas"]), "Expected average Jaccard similarity to be at least as good as reasoner-based retrieval." |
26 |
| -# |
27 |
| -# |
28 |
| -# def test_cache_methods(self): |
29 |
| -# for reasoner in [self.neural_reasoner, self.symbolic_reasoner]: |
30 |
| -# cache_semantic,_ = run_semantic_cache(self.path_kg, self.path_kge, self.cache_size, reasoner, self.eviction, 0, self.cache_type, True) |
31 |
| -# cache_non_semantic,_ = run_non_semantic_cache(self.path_kg, self.path_kge, self.cache_size, reasoner, True) |
32 |
| -# self.run_cache_tests(cache_semantic, cache_non_semantic) |
33 |
| -# |
34 |
| -# def test_cache_size(self): |
35 |
| -# cache_large,_ = run_semantic_cache(self.path_kg, self.path_kge, self.cache_size, self.neural_reasoner, self.eviction, 0, self.cache_type, True) |
36 |
| -# |
37 |
| -# for k in [0.1, 0.2]: |
38 |
| -# cache_small,_ = run_semantic_cache(self.path_kg, self.path_kge, k * self.num_concepts, self.neural_reasoner, self.eviction, 0, self.cache_type, True) |
39 |
| -# assert cache_small["hit_ratio"] <= cache_large["hit_ratio"], f"Expected hit ratio to increase with cache size, but got {cache_small['hit_ratio']} vs {cache_large['hit_ratio']}" |
40 |
| -# assert cache_small["miss_ratio"] >= cache_large["miss_ratio"], f"Expected miss ratio to decrease with cache size, but got {cache_small['miss_ratio']} vs {cache_large['miss_ratio']}" |
41 |
| -# |
42 |
| -# def test_eviction_strategy(self): |
43 |
| -# eviction_strategies = ["LRU", "FIFO", "LIFO", "MRU", "RP"] |
44 |
| -# results = {strategy: float(run_semantic_cache(self.path_kg, self.path_kge, self.cache_size, self.neural_reasoner, strategy, 10, self.cache_type, True)[0]["hit_ratio"]) for strategy in eviction_strategies} |
45 |
| -# |
46 |
| -# for strategy, hit_ratio in results.items(): |
47 |
| -# assert isinstance(hit_ratio, float), f"Hit ratio for {strategy} should be a float, but got {type(hit_ratio)}" |
48 |
| -# |
49 |
| -# best_strategy = max(results, key=results.get) |
50 |
| -# assert best_strategy == "LRU", f"Expected LRU to be the best, but got {best_strategy}" |
51 |
| -# |
52 |
| -# assert results, "No results were generated, possibly due to a failure in the cache evaluation process." |
53 |
| -# for strategy, hit_ratio in results.items(): |
54 |
| -# assert 0.0 <= hit_ratio <= 1.0, f"Hit ratio for {strategy} is out of bounds: {hit_ratio}" |
| 1 | +import os |
| 2 | + |
| 3 | +if "CUDA_VISIBLE_DEVICES" not in os.environ: |
| 4 | + os.environ["CUDA_VISIBLE_DEVICES"] = "0" |
| 5 | + |
| 6 | +import torch |
| 7 | +from ontolearn.semantic_caching import run_semantic_cache, run_non_semantic_cache |
| 8 | + |
| 9 | + |
| 10 | +def check_cuda(): |
| 11 | + if torch.cuda.is_available(): |
| 12 | + print("GPU detected. Setting CUDA_VISIBLE_DEVICES=0") |
| 13 | + else: |
| 14 | + print("No GPU detected. Running on CPU.") |
| 15 | + |
| 16 | +check_cuda() |
| 17 | + |
| 18 | +class TestSemanticCache: |
| 19 | + def setup_method(self): |
| 20 | + self.path_kg = "KGs/Family/father.owl" #path to the father datasets |
| 21 | + self.path_kge = None |
| 22 | + self.symbolic_reasoner = "HermiT" |
| 23 | + self.neural_reasoner = "EBR" |
| 24 | + self.num_concepts = 800 |
| 25 | + self.cache_size = 0.8*self.num_concepts |
| 26 | + self.eviction = "LRU" |
| 27 | + self.cache_type = "cold" |
| 28 | + |
| 29 | + def run_cache_tests(self, cache_semantic, cache_non_semantic): |
| 30 | + assert cache_semantic["hit_ratio"] >= cache_non_semantic["hit_ratio"], f"Expected semantic caching to have higher hit ratio, but got {cache_semantic['hit_ratio']} vs {cache_non_semantic['hit_ratio']}" |
| 31 | + assert cache_semantic["miss_ratio"] <= cache_non_semantic["miss_ratio"], f"Expected semantic caching to have lower miss ratio, but got {cache_semantic['miss_ratio']} vs {cache_non_semantic['miss_ratio']}" |
| 32 | + |
| 33 | + def test_jaccard(self): |
| 34 | + |
| 35 | + cache_neural,_ = run_semantic_cache(self.path_kg, self.path_kge, self.cache_size, self.neural_reasoner, self.eviction, 0, self.cache_type, True) |
| 36 | + cache_symbolic,_ = run_semantic_cache(self.path_kg, self.path_kge, self.cache_size, self.symbolic_reasoner, self.eviction, 0, self.cache_type, True) |
| 37 | + |
| 38 | + assert float(cache_neural["avg_jaccard"]) >= float(cache_neural["avg_jaccard_reas"]), "Expected average Jaccard similarity to be at least as good as reasoner-based retrieval." |
| 39 | + assert float(cache_symbolic["avg_jaccard"]) >= float(cache_symbolic["avg_jaccard_reas"]), "Expected average Jaccard similarity to be at least as good as reasoner-based retrieval." |
| 40 | + |
| 41 | + |
| 42 | + def test_cache_methods(self): |
| 43 | + for reasoner in [self.neural_reasoner, self.symbolic_reasoner]: |
| 44 | + cache_semantic,_ = run_semantic_cache(self.path_kg, self.path_kge, self.cache_size, reasoner, self.eviction, 0, self.cache_type, True) |
| 45 | + cache_non_semantic,_ = run_non_semantic_cache(self.path_kg, self.path_kge, self.cache_size, reasoner, True) |
| 46 | + self.run_cache_tests(cache_semantic, cache_non_semantic) |
| 47 | + |
| 48 | + def test_cache_size(self): |
| 49 | + cache_large,_ = run_semantic_cache(self.path_kg, self.path_kge, self.cache_size, self.neural_reasoner, self.eviction, 0, self.cache_type, True) |
| 50 | + |
| 51 | + for k in [0.1, 0.2]: |
| 52 | + cache_small,_ = run_semantic_cache(self.path_kg, self.path_kge, k * self.num_concepts, self.neural_reasoner, self.eviction, 0, self.cache_type, True) |
| 53 | + assert cache_small["hit_ratio"] <= cache_large["hit_ratio"], f"Expected hit ratio to increase with cache size, but got {cache_small['hit_ratio']} vs {cache_large['hit_ratio']}" |
| 54 | + assert cache_small["miss_ratio"] >= cache_large["miss_ratio"], f"Expected miss ratio to decrease with cache size, but got {cache_small['miss_ratio']} vs {cache_large['miss_ratio']}" |
| 55 | + |
| 56 | + def test_eviction_strategy(self): |
| 57 | + eviction_strategies = ["LRU", "FIFO", "LIFO", "MRU", "RP"] |
| 58 | + results = {strategy: float(run_semantic_cache(self.path_kg, self.path_kge, self.cache_size, self.neural_reasoner, strategy, 10, self.cache_type, True)[0]["hit_ratio"]) for strategy in eviction_strategies} |
| 59 | + |
| 60 | + for strategy, hit_ratio in results.items(): |
| 61 | + assert isinstance(hit_ratio, float), f"Hit ratio for {strategy} should be a float, but got {type(hit_ratio)}" |
| 62 | + |
| 63 | + best_strategy = max(results, key=results.get) |
| 64 | + assert best_strategy == "LRU", f"Expected LRU to be the best, but got {best_strategy}" |
| 65 | + |
| 66 | + assert results, "No results were generated, possibly due to a failure in the cache evaluation process." |
| 67 | + for strategy, hit_ratio in results.items(): |
| 68 | + assert 0.0 <= hit_ratio <= 1.0, f"Hit ratio for {strategy} is out of bounds: {hit_ratio}" |
0 commit comments