diff --git a/docs/examples/algo/complete-example.py b/docs/examples/algo/complete-algo-example.py similarity index 97% rename from docs/examples/algo/complete-example.py rename to docs/examples/algo/complete-algo-example.py index 52f8f68ced..6a181f1007 100644 --- a/docs/examples/algo/complete-example.py +++ b/docs/examples/algo/complete-algo-example.py @@ -153,11 +153,13 @@ def runHotgym(): ) # Print the best prediction for 1 step out. - probability, value = sorted( + oneStepConfidence, oneStep = sorted( zip(classifierResult[1], classifierResult["actualValues"]), reverse=True )[0] - print("1-step: {:16} ({:4.4}%)".format(value, probability * 100)) + print("1-step: {:16} ({:4.4}%)".format(oneStep, oneStepConfidence * 100)) + + yield oneStep, oneStepConfidence * 100, None, None diff --git a/docs/examples/network/complete-example.py b/docs/examples/network/complete-network-example.py similarity index 94% rename from docs/examples/network/complete-example.py rename to docs/examples/network/complete-network-example.py index 509837f917..251eb20753 100644 --- a/docs/examples/network/complete-example.py +++ b/docs/examples/network/complete-network-example.py @@ -100,7 +100,7 @@ def getPredictionResults(network, clRegionName): N = classifierRegion.getSelf().maxCategoryCount results = {step: {} for step in steps} for i in range(len(steps)): - # stepProbabilities: probabilities for this prediction step only. + # stepProbabilities are probabilities for this prediction step only. stepProbabilities = probabilities[i * N:(i + 1) * N - 1] mostLikelyCategoryIdx = stepProbabilities.argmax() predictedValue = actualValues[mostLikelyCategoryIdx] @@ -143,11 +143,10 @@ def runHotgym(): fiveStep = results[5]["predictedValue"] fiveStepConfidence = results[5]["predictionConfidence"] - print("1-step: {:16} ({:4.4}%)\t" - "5-step: {:16} ({:4.4}%)".format(oneStep, - oneStepConfidence * 100, - fiveStep, - fiveStepConfidence * 100)) + result = (oneStep, oneStepConfidence * 100, + fiveStep, fiveStepConfidence * 100) + print "1-step: {:16} ({:4.4}%)\t 5-step: {:16} ({:4.4}%)".format(*result) + yield result if __name__ == "__main__": diff --git a/docs/examples/opf/complete-example.py b/docs/examples/opf/complete-opf-example.py similarity index 82% rename from docs/examples/opf/complete-example.py rename to docs/examples/opf/complete-opf-example.py index d476d66763..8eab69f934 100644 --- a/docs/examples/opf/complete-example.py +++ b/docs/examples/opf/complete-opf-example.py @@ -42,11 +42,10 @@ def runHotgym(): fiveStep = bestPredictions[5] fiveStepConfidence = allPredictions[5][fiveStep] - print("1-step: {:16} ({:4.4}%)\t" - "5-step: {:16} ({:4.4}%)".format(oneStep, - oneStepConfidence * 100, - fiveStep, - fiveStepConfidence * 100)) + result = (oneStep, oneStepConfidence * 100, + fiveStep, fiveStepConfidence * 100) + print "1-step: {:16} ({:4.4}%)\t 5-step: {:16} ({:4.4}%)".format(*result) + yield result diff --git a/docs/examples/opf/results-example.py b/docs/examples/opf/results-example.py index 535adf06b0..dc7287ceb2 100644 --- a/docs/examples/opf/results-example.py +++ b/docs/examples/opf/results-example.py @@ -7,9 +7,6 @@ oneStepConfidence = allPredictions[1][oneStep] fiveStepConfidence = allPredictions[5][fiveStep] -print("1-step: {:16} ({:4.4}%)\t5-step: {:16} ({:4.4}%)".format( - oneStep, - oneStepConfidence*100, - fiveStep, - fiveStepConfidence*100 -)) +result = (oneStep, oneStepConfidence * 100, + fiveStep, fiveStepConfidence * 100) +print "1-step: {:16} ({:4.4}%)\t 5-step: {:16} ({:4.4}%)".format(*result) diff --git a/docs/source/quick-start/algorithms.rst b/docs/source/quick-start/algorithms.rst index 595e83dc04..e30eb8fa02 100644 --- a/docs/source/quick-start/algorithms.rst +++ b/docs/source/quick-start/algorithms.rst @@ -9,7 +9,7 @@ Here is the complete program we are going to use as an example. In sections below, we'll break it down into parts and explain what is happening (without some of the plumbing details). -.. literalinclude:: ../../examples/algo/complete-example.py +.. literalinclude:: ../../examples/algo/complete-algo-example.py Encoding Data diff --git a/docs/source/quick-start/network.rst b/docs/source/quick-start/network.rst index 827a49d940..b444cc2a12 100644 --- a/docs/source/quick-start/network.rst +++ b/docs/source/quick-start/network.rst @@ -9,7 +9,7 @@ Here is the complete program we are going to use as an example. In sections below, we'll break it down into parts and explain what is happening (without some of the plumbing details). -.. literalinclude:: ../../examples/network/complete-example.py +.. literalinclude:: ../../examples/network/complete-network-example.py Network Parameters ^^^^^^^^^^^^^^^^^^ diff --git a/docs/source/quick-start/opf.rst b/docs/source/quick-start/opf.rst index ec9d945f13..8887d22ddd 100644 --- a/docs/source/quick-start/opf.rst +++ b/docs/source/quick-start/opf.rst @@ -9,7 +9,7 @@ Here is the complete program we are going to use as an example. In sections below, we'll break it down into parts and explain what is happening (without some of the plumbing details). -.. literalinclude:: ../../examples/opf/complete-example.py +.. literalinclude:: ../../examples/opf/complete-opf-example.py Model Parameters ^^^^^^^^^^^^^^^^ diff --git a/tests/unit/nupic/docs/examples_test.py b/tests/unit/nupic/docs/examples_test.py index 7c54242962..8dcb6765f6 100644 --- a/tests/unit/nupic/docs/examples_test.py +++ b/tests/unit/nupic/docs/examples_test.py @@ -24,50 +24,154 @@ import os import sys import unittest2 as unittest +import numpy as np +import random +SEED = 42 +random.seed(SEED) +np.random.seed(SEED) -def _runExample(): - """Import and run main function runHotgym() in complete-example.py""" - mod = __import__("complete-example", fromlist=["runHotgym"]) - runHotgym = getattr(mod, 'runHotgym') - runHotgym() +def _getPredictionsGenerator(examplesDir, exampleName): + """ + Get predictions generator for one of the quick-start example. + .. note:: -class ExamplesTest(unittest.TestCase): - """Unit tests for all quick-start examples.""" + The examples are not part of the nupic package so we need to manually + append the example module path to syspath. + :param examplesDir: + (str) path to the example parent directory. + :param exampleName: + (str) name of the example. E.g: "opf", "network", "algo". + :return predictionsGenerator: + (function) predictions generator functions. + """ - def setUp(self): - docsTestsPath = os.path.dirname(os.path.abspath(__file__)) - self.examplesDir = os.path.join(docsTestsPath, os.path.pardir, - os.path.pardir, os.path.pardir, - os.path.pardir, "docs", "examples") + sys.path.insert(0, os.path.join(examplesDir, exampleName)) + modName = "complete-%s-example" % exampleName + mod = __import__(modName, fromlist=["runHotgym"]) + return getattr(mod, "runHotgym") - def testExamplesDirExists(self): - """Make sure the ``examples`` directory is in the correct location""" - self.assertTrue(os.path.exists(self.examplesDir), - "Path to examples does not exist: %s" % self.examplesDir) +class ExamplesTest(unittest.TestCase): + """Unit tests for all quick-start examples.""" + + examples = ["opf", "network", "algo"] + oneStepPredictions = {example: [] for example in examples} + oneStepConfidences = {example: [] for example in examples} + fiveStepPredictions = {example: [] for example in examples} + fiveStepConfidences = {example: [] for example in examples} - def testOPFExample(self): - """Make sure the OPF example does not throw any exception""" - sys.path.insert(0, os.path.join(self.examplesDir, "opf")) # Add to path - _runExample() + docsTestsPath = os.path.dirname(os.path.abspath(__file__)) + examplesDir = os.path.join(docsTestsPath, os.path.pardir, + os.path.pardir, os.path.pardir, + os.path.pardir, "docs", "examples") - def testNetworkAPIExample(self): - """Make sure the network API example does not throw any exception""" - sys.path.insert(0, os.path.join(self.examplesDir, "network")) # Add to path - _runExample() + @classmethod + def setUpClass(cls): + """Get the predictions and prediction confidences for all examples.""" + for example in cls.examples: + predictionsGenerator = _getPredictionsGenerator(cls.examplesDir, example) + for (oneStepPrediction, oneStepConfidence, + fiveStepPrediction, fiveStepConfidence) in predictionsGenerator(): + cls.oneStepPredictions[example].append(oneStepPrediction) + cls.oneStepConfidences[example].append(oneStepConfidence) + cls.fiveStepPredictions[example].append(fiveStepPrediction) + cls.fiveStepConfidences[example].append(fiveStepConfidence) - def testAlgoExample(self): - """Make sure the algorithm API example does not throw any exception""" - sys.path.insert(0, os.path.join(self.examplesDir, "algo")) # Add to path - _runExample() + def testExamplesDirExists(self): + """Make sure the examples directory is in the correct location""" + failMsg = "Path to examples does not exist: %s" % ExamplesTest.examplesDir + self.assertTrue(os.path.exists(ExamplesTest.examplesDir), failMsg) + + + @unittest.skip("Skip test until we figure out why we get different " + "results with OPF, Network and Algorithm APIs.") + def testNumberOfOneStepPredictions(self): + """Make sure all examples output the same number of oneStepPredictions.""" + + self.assertEquals(len(ExamplesTest.oneStepPredictions["opf"]), + len(ExamplesTest.oneStepPredictions["algo"])) + self.assertEquals(len(ExamplesTest.oneStepPredictions["opf"]), + len(ExamplesTest.oneStepPredictions["network"])) + + + @unittest.skip("Skip test until we figure out why we get different " + "results with OPF, Network and Algorithm APIs.") + def testOneStepPredictionsOpfVsAlgo(self): + """Make sure one-step predictions are the same for OPF and Algo API.""" + for i in range(len(ExamplesTest.oneStepPredictions["opf"])): + self.assertEquals(ExamplesTest.oneStepPredictions["opf"][i], + ExamplesTest.oneStepPredictions["algo"][i]) + + + @unittest.skip("Skip test until we figure out why we get different " + "results with OPF, Network and Algorithm APIs.") + def testOneStepPredictionsOpfVsNetwork(self): + """Make sure one-step predictions are the same for OPF and Network API.""" + for i in range(len(ExamplesTest.oneStepPredictions["opf"])): + self.assertEquals(ExamplesTest.oneStepPredictions["opf"][i], + ExamplesTest.oneStepPredictions["network"][i]) + + + @unittest.skip("Skip test until we figure out why we get different " + "results with OPF, Network and Algorithm APIs.") + def testOneStepPredictionsAlgoVsNetwork(self): + """Make sure one-step predictions are the same for Algo and Network API.""" + for i in range(len(ExamplesTest.oneStepPredictions["algo"])): + self.assertEquals(ExamplesTest.oneStepPredictions["algo"][i], + ExamplesTest.oneStepPredictions["network"][i]) + + + @unittest.skip("Skip test until we figure out why we get different " + "results with OPF, Network and Algorithm APIs.") + def testFiveStepPredictionsOpfVsNetwork(self): + """Make sure five-step predictions are the same for OPF and Network API.""" + for i in range(len(ExamplesTest.fiveStepPredictions["opf"])): + self.assertEquals(ExamplesTest.fiveStepPredictions["opf"][i], + ExamplesTest.fiveStepPredictions["network"][i]) + + + @unittest.skip("Skip test until we figure out why we get different " + "results with OPF, Network and Algorithm APIs.") + def testOneStepConfidencesOpfVsAlgo(self): + """Make sure one-step confidences are the same for OPF and Algo API.""" + for i in range(len(ExamplesTest.oneStepConfidences["opf"])): + self.assertEquals(ExamplesTest.oneStepConfidences["opf"][i], + ExamplesTest.oneStepConfidences["algo"][i]) + + + @unittest.skip("Skip test until we figure out why we get different " + "results with OPF, Network and Algorithm APIs.") + def testOneStepConfidencesOpfVsNetwork(self): + """Make sure one-step confidences are the same for OPF and Network API.""" + for i in range(len(ExamplesTest.oneStepConfidences["opf"])): + self.assertEquals(ExamplesTest.oneStepConfidences["opf"][i], + ExamplesTest.oneStepConfidences["network"][i]) + + + @unittest.skip("Skip test until we figure out why we get different " + "results with OPF, Network and Algorithm APIs.") + def testOneStepConfidencesAlgoVsNetwork(self): + """Make sure one-step confidences are the same for Algo and Network API.""" + for i in range(len(ExamplesTest.oneStepConfidences["algo"])): + self.assertEquals(ExamplesTest.oneStepConfidences["algo"][i], + ExamplesTest.oneStepConfidences["network"][i]) + + + @unittest.skip("Skip test until we figure out why we get different " + "results with OPF, Network and Algorithm APIs.") + def testFiveStepConfidencesOpfVsNetwork(self): + """Make sure five-step confidences are the same for OPF and Network API.""" + for i in range(len(ExamplesTest.fiveStepConfidences["opf"])): + self.assertEquals(ExamplesTest.fiveStepConfidences["opf"][i], + ExamplesTest.fiveStepConfidences["network"][i])