diff --git a/doc/utr/Makefile b/doc/utr/Makefile index 51528ea0a..14b171906 100644 --- a/doc/utr/Makefile +++ b/doc/utr/Makefile @@ -11,7 +11,7 @@ # and/or utr18.pdf. # -FILES=utr3.pdf utr15.pdf utr16.pdf utr18.pdf utr19.pdf utr22.pdf utr20.pdf +FILES=utr3.pdf utr15.pdf utr16.pdf utr18.pdf utr19.pdf utr22.pdf utr20.pdf utr23.pdf all: $(FILES) diff --git a/doc/utr/utr23.tex b/doc/utr/utr23.tex new file mode 100644 index 000000000..98f3173c4 --- /dev/null +++ b/doc/utr/utr23.tex @@ -0,0 +1,666 @@ +\documentclass[letterpaper,12pt]{article} +\usepackage{geometry}[margins=1in] +%\usepackage{url} +\usepackage{hyperref} +\hypersetup{pdftex, colorlinks=true, linkcolor=blue, citecolor=blue, filecolor=blue, urlcolor=blue, pdftitle=, pdfauthor=, pdfsubject=, pdfkeywords=} +\usepackage{utr23/utr23} + +\title{Unicon Unit Testing Framework\\ User's Guide} +\author{Ian Trudel} +\trnumber{23} +\date{May 8, 2024} + +% Outline numbering +\setcounter{secnumdepth}{3} +\makeatletter + +\begin{document} +\abstract{ +This technical report introduces a unit testing framework for Unicon, inspired from Kent Beck's SUnit [1]. The framework aims to provide a robust testing environment for Unicon applications, simplifiying the process of writing and executing tests. +} +\maketitle + +\section{Introduction} +Unit testing is a critical part of the development process, ensuring that individual units of source code function correctly. This new framework brings structured and repeatable testing to Unicon programs and libraries whilst leveraging Unicon's Object Oriented Programming features. + +\section{Components of the Framework} +\subsection{TestSuite} +A test suite allows to aggregate multiple test cases in order to facilitate their collective execution. + +\bigskip +\hrule\vspace{0.1cm} +\noindent +{\tt\bf add(testCase)} \hfill {\tt\bf adding a test case} + +\vspace{0.1cm} +\noindent +\texttt{add(testCase)} adds a TestCase instance to the suite. + +\bigskip +\hrule\vspace{0.1cm} +\noindent +{\tt\bf run()} \hfill {\tt\bf run a test suite} + +\vspace{0.1cm} +\noindent +\texttt{run()} executes all tests in a suite and returns an exit status. + + +\subsection{TestReporter} + +\texttt{TestReporter} collects and displays test results in a readable format. A TestReporter can be subclassed and passed onto a TestSuite for a custom reporting format. + +\bigskip +\hrule\vspace{0.1cm} +\noindent +{\tt\bf result(cname, mname, passed)} \hfill {\tt\bf adding a result} + +\vspace{0.1cm} +\noindent +\texttt{result(cname, mname, passed)} adds the result of a test including the classname, method name and whether it passed or not. + +\bigskip +\hrule\vspace{0.1cm} +\noindent +{\tt\bf results()} \hfill {\tt\bf returns the test results} + +\vspace{0.1cm} +\noindent +\texttt{results()} returns the test results including the number of tests that have passed, failed and the total runtime, as a record \texttt{total(passed, failed, runtime)}. + +\bigskip +\hrule\vspace{0.1cm} +\noindent +{\tt\bf runtime(time)} \hfill {\tt\bf sets the total runtime} + +\vspace{0.1cm} +\noindent +\texttt{runtime(time)} sets the total runtime for the entire TestSuite. + +\bigskip +\hrule\vspace{0.1cm} +\noindent +{\tt\bf summary()} \hfill {\tt\bf display the tests summary} + +\vspace{0.1cm} +\noindent +\texttt{summary()} reports the test results including the number of tests that have passed, failed and the total runtime in a human-readable format. It also returns a record \texttt{total(passed, failed, runtime)}. + +\subsection{TestCase} + +\texttt{TestCase} serves as the base class for all tests. It is necessary to subclass \texttt{TestCase} in order to write individual tests. It provides methods for set up and tear down the test environment both on a per-class and per-test basis. Every method starting with \texttt{test} in the subclass will be automatically recognized and run by the test suite. + +\bigskip +\hrule\vspace{0.1cm} +\noindent +{\tt\bf setupClass()} \hfill {\tt\bf setting up a class} + +\vspace{0.1cm} +\noindent +\texttt{setupClass()} initializes resources before any tests in the class run. It is used to allocate or initialize resources shared across all tests in a test class, ensuring they are available before any tests are executed. + +\bigskip +\hrule\vspace{0.1cm} +\noindent +{\tt\bf teardownClass()} \hfill {\tt\bf tearing down a class} + +\vspace{0.1cm} +\noindent +\texttt{teardownClass()} cleans up resources after all tests in the class have run. It ensures that all allocated resources in setupClass() are properly released or cleaned up after all tests have completed. + +\bigskip +\hrule\vspace{0.1cm} +\noindent +{\tt\bf setup()} \hfill {\tt\bf setting up a test} + +\vspace{0.1cm} +\noindent +\texttt{setup()} initializes resources before each individual test runs, ensuring a consistent starting point for all tests. + +\bigskip +\hrule\vspace{0.1cm} +\noindent +{\tt\bf teardown()} \hfill {\tt\bf tearing down a test} + +\vspace{0.1cm} +\noindent +\texttt{teardown()} cleans up resources after each test individual test runs, maintaining isolation between tests. + +\newpage\subsection{Assertions} + +\bigskip +\hrule\vspace{0.1cm} +\noindent +{\tt\bf assertEqual(expected, actual)} \hfill {\tt\bf assert equal values} + +\vspace{0.1cm} +\noindent +\texttt{assertEqual(expected, actual)} verifies that the actual value generated is equal to the expected value. + +\bigskip +\hrule\vspace{0.1cm} +\noindent +{\tt\bf assertNotEqual(unexpected, actual)} \hfill {\tt\bf assert unequal values} + +\vspace{0.1cm} +\noindent +\texttt{assertNotEqual(unexpected, actual)} verifies that the actual value generated is not equal to the unexpected value. + +\bigskip +\hrule\vspace{0.1cm} +\noindent +{\tt\bf assert \{ expression \}} \hfill {\tt\bf assert expression} + +\vspace{0.1cm} +\noindent +\texttt{assert \{ expression \}} evaluates an expression, and if true, the test will pass, otherwise will fail. + +\bigskip +\hrule\vspace{0.1cm} +\noindent +{\tt\bf assertFail \{ expression \}} \hfill {\tt\bf assert fail expression} + +\vspace{0.1cm} +\noindent +\texttt{assertFail \{ expression \}} evaluates an expression, and if false, the test will pass, otherwise will fail. + +\subsection{Mock} + +\bigskip +\hrule\vspace{0.1cm} +\noindent +{\tt\bf expect(mname, retval)} \hfill {\tt\bf set an expectation for a method call} + +\vspace{0.1cm} +\noindent +\texttt{expect(mname, retval)} sets an expectation for the method call \texttt{mname} and returns the value \texttt{retval}. + +\bigskip +\hrule\vspace{0.1cm} +\noindent +{\tt\bf verify(mname, args)} \hfill {\tt\bf verify a given method exists} + +\vspace{0.1cm} +\noindent +\texttt{verify(mname, args)} verifies whether a method exists or not, will return the name of the method or fail. + +\bigskip +\hrule\vspace{0.1cm} +\noindent +{\tt\bf invoke(mname, args)} \hfill {\tt\bf invoke a method} + +\vspace{0.1cm} +\noindent +\texttt{invoke(mname, args)} simulates the invocation of a method \texttt{mname} with the arguments \texttt{args}. It checks whether the call matches the expectations or not and returns the expected value, otherwise will fail. + +\newpage\section{Example Usage} + +\subsection{A simple Calculator} + +This example demonstrates the basic functionality of the unit testing framework using a simple Calculator class. The tests cover the basic addition and substraction methods, ensuring that the calculator performs these operations correctly. This example serves as an introduction to creating test cases, setting up the test environment, and validating the outcomes using assertions. + +\bigskip\noindent {\bf calculator.icn} +\begin{verbatim} +class Calculator() + method add(n, m) + return n + m + end + + method sub(n, m) + return n - m + end +end +\end{verbatim} + +\bigskip \noindent {\bf test\_calculator.icn} +\begin{verbatim} +link calculator + +import unittest + +class TestCalculator : TestCase(calc) + method setup() + calc := Calculator() + end + + method testAdd() + assertEqual(10, calc.add(6, 4)) + end + + method testSub() + assertEqual(3, calc.sub(8, 5)) + end +end + +procedure main() + ts := TestSuite() + ts.add(TestCalculator()) + exit(ts.run()) +end +\end{verbatim} + +\bigskip\noindent {\bf Output} +\begin{verbatim} +TestCalculator_testAdd + +TestCalculator_testSub + +=== TEST SUMMARY === +passed: 2, failed: 0 +ran 2 tests in 0.000062s +\end{verbatim} + +\noindent {\bf Highlights} +\begin{itemize} + \item \texttt{setup()} initializes a new instance of Calculator() before each test. + \item \texttt{testAdd()} asserts that the add method works correctly. + \item \texttt{testSub()} asserts that the sub method works correctly. + \item The test summary reports the two (2) tests have passed. +\end{itemize} + +\newpage\subsection{Logger} + +The logger example demonstrates the framework's ability to handle more complex objects and interactions, such as file operations. The Logger class includes methods for opening a log file, writing messages, rotating logs and closing the log file. This example illustrates the use of \texttt{setup()} and \texttt{teardown()} methods to manage file system side effects, ensuring each test runs in a clean environment. The tests validate the logger's ability to handle maximum file size constraints, correctly write log messages, and perform log rotation. + +\bigskip\noindent {\bf logger.icn} +\begin{verbatim} +class Logger(__filename, __filehandle, __max_size) + method open(filename) + self.__filename := filename + self.__filehandle := ::open(self.__filename, "a") | + fail("Unable to open log file") + end + + method maxsize(size) + return self.__max_size := \size | self.__max_size + end + + method write(message) + stats := stat(self.__filename) + if stats.size > self.__max_size then + self.rotate() + ::write(self.__filehandle, message || "\n") + end + + method close() + ::close(self.__filehandle) + end + + method rotate() + ::close(self.__filehandle) + rename(self.__filename, self.__filename || ".old") + open(self.__filename) + end + +initially + self.__max_size := 1024 +end +\end{verbatim} + +\bigskip\noindent {\bf test\_logger.icn} +\begin{verbatim} +link logger, io + +import unittest + +class TestLogger : TestCase(logger, filename) + method setup() + filename := "logfile.log" + + logger := Logger() + logger.open(filename) + end + + method teardown() + remove(filename) + exists(filename || ".old") & remove(filename || ".old") + end + + method testMaxSize() + assertEqual(1024, logger.maxsize()) + assertEqual(888, logger.maxsize(888)) + assert { -1 ~= logger.maxsize(-1) } + end + + method testWriteLog() + s := "Welcome to Unicon" + + logger.write(s) + + file := open(filename, "r") + contents := reads(file) + assert { find(contents, s) > 0 } + close(file) + end + + method testLogRotation() + assertEqual(10, logger.maxsize(10)) + + every 1 to 20 do logger.write("rotate") + + assert { exists(filename || ".old") } + end +end + +procedure main() + ts := TestSuite() + ts.add(TestLogger()) + exit(ts.run()) +end +\end{verbatim} + +\bigskip\noindent {\bf Output} +\begin{verbatim} +TestLogger_testMaxSize +Assertion failed. + +TestLogger_testWriteLog + +TestLogger_testLogRotation + +=== TEST SUMMARY === +passed: 2, failed: 1 +ran 3 tests in 0.002208s +\end{verbatim} + +\bigskip\noindent An assertion fails in \texttt{testMaxSize()} because the method \texttt{maxsize()} does not handle negative size properly. Here's a revised implementation: + +\bigskip\noindent {\bf logger.icn} +\begin{verbatim} + method maxsize(size) + return self.__max_size := ((\size > 0) & \size) | self.__max_size + end +\end{verbatim} + +\bigskip\noindent {\bf Output} +\begin{verbatim} +TestLogger_testMaxSize + +TestLogger_testWriteLog + +TestLogger_testLogRotation + +=== TEST SUMMARY === +passed: 3, failed: 0 +ran 3 tests in 0.002033s +\end{verbatim} + +\noindent {\bf Highlights} +\begin{itemize} + \item \texttt{setup()} initializes a Logger instance and opens a log file before each test, ensuring that each test starts with a new log file. + \item \texttt{teardown()} cleans up by removing the log file and any old log file after each test, maintaining a clean test environment. + \item \texttt{testMaxSize()} tests the maxsize method to ensure it correctly sets and returns the maximum log file size. + \item \texttt{testWriteLog()} validates that the write method correctly writes messages to the log file. + \item \texttt{testLogRotation()} ensures that log rotation is triggered when the log file exceeds the specified maximum size, and that the old log file is correctly renamed. +\end{itemize} + +\newpage\subsection{Fixtures} + +Fixtures are pre-defined data that are used as a foundation for running tests. They help ensure consistency and reliability across tests by providing a known environment. This is particularly useful when testing code that interacts with external resources such as a databases, files or network connections. Fixtures are set up before the tests are run and are torn down afterwards to clean up any changes made during testing. + +\bigskip\noindent {\bf fixtures\_bank\_account.sql} +\begin{verbatim} +CREATE TABLE IF NOT EXISTS accounts ( + account TEXT PRIMARY KEY, + holder TEXT NOT NULL, + balance REAL DEFAULT 0.0 +); + +INSERT INTO accounts (account, holder, balance) VALUES + ('123456789', 'Ada Lovelace', 10000.00), + ('987654321', 'Alan C. Kay', 99801.28), + ('123454321', 'Ralph Griswold', 48000.00); + +CREATE TABLE IF NOT EXISTS transactions ( + transaction_id INTEGER PRIMARY KEY AUTOINCREMENT, + account TEXT, + type TEXT CHECK(type IN ('deposit', 'withdrawal')), + amount REAL, + FOREIGN KEY (account) REFERENCES accounts(account) +); + +INSERT INTO transactions (account, type, amount) VALUES + ('123456789', 'deposit', 200.00), + ('987654321', 'withdrawal', 1500.00), + ('123454321', 'deposit', 100.00), + ('987654321', 'deposit', 75.00), + ('123456789', 'withdrawal', 3456.75); +\end{verbatim} + +\noindent Use SQLite to create \texttt{bank.db}: +\begin{verbatim} +sqlite3 bank.db < fixtures_bank_account.sql +\end{verbatim} + +\newpage\bigskip\noindent {\bf bank\_account.icn} +\begin{verbatim} +class BankAccount(account, holder, balance) + method deposit(amount) + if amount > 0 then balance +:= amount + end + + method withdraw(amount) + if 0 < amount <= balance then balance -:= amount else fail("overdraft") + end + +initially + /balance := 0.0 +end +\end{verbatim} + +\bigskip\noindent {\bf test\_bank\_account.icn} +\begin{verbatim} +link bank_account + +import unittest +import sqLite + +record transaction(id, account, type, amount) + +class TestBankAccount : TestCase(db, accounts) + method setupClass() + db := SQLite("bank.db") + end + + method teardownClass() + db.Close() + end + + method setup() + accounts := [] + db_accounts := db.SQL_As_List("SELECT * FROM accounts") + every db_account := !db_accounts do { + put(accounts, BankAccount ! db_account) + } + end + + method findAccount(account) + every __account := !accounts do { + if __account.account == account then suspend __account + } + end + + method testValidAccount() + assert { 0 < *accounts } + every account := !accounts do { + assert { 0 < *account.account } + assert { 0 < *account.holder } + assert { 0.0 <= account.balance } + } + end + + method testTransactions() + transactions := [] + db_transactions := db.SQL_As_List("SELECT * FROM transactions") + every db_transaction := !db_transactions do { + put(transactions, transaction ! db_transaction) + } + + every transaction := !transactions do { + account := findAccount(transaction.account) + case transaction.type of { + "deposit": { + balance := account.balance + account.deposit(transaction.amount) + assertEqual(balance + transaction.amount, account.balance) + } + "withdrawal": { + balance := account.balance + account.withdraw(transaction.amount) + assertEqual(balance - transaction.amount, account.balance) + } + } + } + end + + method testOverdraft() + every account := !accounts do { + balance := account.balance + assertFail { account.withdraw(balance * 10) } + assertEqual(balance, account.balance) + } + end +end + +procedure main() + ts := TestSuite() + ts.add(TestBankAccount()) + exit(ts.run()) +end +\end{verbatim} + +\bigskip\noindent {\bf Output} +\begin{verbatim} +TestBankAccount_testValidAccount + +TestBankAccount_testTransactions + +TestBankAccount_testOverdraft + +=== TEST SUMMARY === +passed: 3, failed: 0 +ran 3 tests in 0.004026s +\end{verbatim} + +\newpage\noindent {\bf Highlights} +\begin{itemize} + \item \texttt{setupClass()} initializes the SQLite database connection before any tests run, ensuring that the database is ready for operations. + \item \texttt{teardownClass()} closes the database connection after all tests have run, ensuring that resources are properly released. + \item \texttt{setup()} loads bank accounts from the database into the accounts list before each test, providing a fresh set of data. + \item \texttt{findAccount(account)} is a utility method to locate an account by account number. + \item \texttt{testValidAccount()} validates that each account has an account number, account holder and a balance. + \item \texttt{testTransactions()} validates deposit and withdrawal transactions against the database. + \item \texttt{testOverdraft()} tests that overdraft attempts fail correctly, ensuring no account balance goes negative. + \item The test summary reports the three (3) tests have passed. +\end{itemize} + +\newpage\subsection{Mock objects} + +Test-driven development (TDD) encourages the use of mock objects as a test-first strategy. Mock objects aim to replicate the behaviour of actual objects without their actual implementation, thus allowing tests to be written nonetheless. The following example demonstrates how to implement tests for temperature sensors that are not yet implemented (or physically unavailable). + +\bigskip\noindent {\bf temperature\_sensor.icn} +\begin{verbatim} +class TemperatureSensor() + method read() + stop("TemperatureSensor: not yet implemented.") + end +end + +class TemperatureMonitor(sensors, threshold, callback) + method check() + retval := 0 + + every sensor := !self.sensors do { + temperature := sensor.read() + if temperature > self.threshold then { + self.callback(sensor, temperature) + retval +:= 1 + } + } + + return (retval == 0 & 1) | (retval > 0 & fail) + end +end +\end{verbatim} + +\bigskip\noindent {\bf test\_temperature\_sensor.icn} +\begin{verbatim} +link temperature_sensor + +import unittest + +class MockTemperatureSensor : TemperatureSensor : Mock() + method read() + return self.invoke("read") + end +end + +procedure alert(sensor, temperature) + write("Sensor " || image(sensor) || " temperature " || + temperature || " exceeds threshold") +end + +class TestTemperatureMonitor : TestCase(monitor, sensors) + method setup() + self.sensors := [ + MockTemperatureSensor(), + MockTemperatureSensor(), + MockTemperatureSensor() + ] + + self.monitor := TemperatureMonitor(self.sensors, 115, alert) + end + + method testCheckTemperature() + every sensor := !self.sensors do { + sensor.expect("read", ?40) + } + + assert { self.monitor.check() } + end + + method testCheckTemperatureThreshold() + every sensor := !self.sensors do { + sensor.expect("read", 115 + ?100) + } + + assertFail { self.monitor.check() } + end +end + +procedure main() + ts := TestSuite() + ts.add(TestTemperatureMonitor()) + exit(ts.run()) +end +\end{verbatim} + +\bigskip\noindent {\bf Output} +\begin{verbatim} +TestTemperatureMonitor_testCheckTemperature + +TestTemperatureMonitor_testCheckTemperatureThreshold +Sensor object MockTemperatureSensor_4(1) temperature 136 exceeds threshold +Sensor object MockTemperatureSensor_5(1) temperature 199 exceeds threshold +Sensor object MockTemperatureSensor_6(1) temperature 215 exceeds threshold + +=== TEST SUMMARY === +passed: 2, failed: 0 +ran 2 tests in 0.000807s +\end{verbatim} + +\noindent {\bf Highlights} +\begin{itemize} + \item \texttt{MockTemperatureSensor} class inherits from both TemperatureSensor and Mock, thus allowing to mock the read() method. + \item \texttt{setup()} defines a series of sensors to be monitored along with a threshold temperature and a callback method. + \item \texttt{testCheckTemperature()} checks that the temperature reported by the sensors are within an acceptable threshold. + \item \texttt{testCheckTemperatureThreshold()} checks that the temperature reported by the sensors are not within an acceptable threshold. + \item The test summary reports the two (2) tests have passed. +\end{itemize} + +\section*{References} + +\noindent +[1] Kent Beck, Simple Smalltalk Testing: With Patterns. Available at \url{https://web.archive.org/web/20150315073817/http://www.xprogramming.com/testfram.htm} [Verified 13 mai 2024] + +\end{document} diff --git a/doc/utr/utr23/utr23.sty b/doc/utr/utr23/utr23.sty new file mode 100644 index 000000000..38dddd92c --- /dev/null +++ b/doc/utr/utr23/utr23.sty @@ -0,0 +1,35 @@ +% tr.sty +\gdef\@abstract{}\gdef\@trnumber{} +\def\maketitle{\begin{titlepage} +\let\footnotesize\small \let\footnoterule\relax \setcounter{page}{0} +\null +\vfil +\vskip 40pt \begin{center} +{\LARGE\bf \@title \par} \vskip 3em {\large \lineskip .75em +{\bf \@author} +\par} +\vskip 1.0em {\bf Unicon Technical Report: \@trnumber}\par +\vskip 1.0em {\large\bf \@date} +\vskip 2.5em \par + +{\bf Abstract} +\begin{quote} +\@abstract +\end{quote} +\vskip 0.4in +{\large\bf +Unicon Project\\ +http://unicon.org\\ +} +\end{center} + +\@thanks +\vfil +\null +\end{titlepage} +\setcounter{footnote}{0} \let\thanks\relax +\gdef\@thanks{}\gdef\@author{}\gdef\@title{}\let\maketitle\relax} +\def\trnumber#1{\gdef\@trnumber{#1}} +\def\abstract#1{\gdef\@abstract{#1}} + + diff --git a/tests/lib/unittest.icn b/tests/lib/unittest.icn new file mode 100644 index 000000000..6cac9d4e2 --- /dev/null +++ b/tests/lib/unittest.icn @@ -0,0 +1,319 @@ +import unittest + +link ximage, capture, io + +class TestTestSuite : TestCase(__testSuite) + $define EXIT_SUCCESS 0 + $define EXIT_FAILURE 1 + + method setup() + self.__testSuite := TestSuite() + end + + method testInitialState() + assertEqual(0, *self.__testSuite.__tests) + assertEqual("unittest__TestReporter", classname(self.__testSuite.__testReporter)) + end + + method testAddTest() + st := SimpleTest() + self.__testSuite.add(st) + assertEqual(1, *self.__testSuite.__tests) + end + + method testAddNonTest() + assertEqual(0, *self.__testSuite.__tests) + self.__testSuite.add(Empty()) + self.__testSuite.add(&null) + self.__testSuite.add("StringClass") + assertEqual(0, *self.__testSuite.__tests) + end + + method testRunTests() + assertEqual(0, *self.__testSuite.__tests) + assertEqual(EXIT_SUCCESS, self.__testSuite.run()) + + st := SimpleTest() + self.__testSuite.add(st) + assertEqual(EXIT_SUCCESS, self.__testSuite.run()) + end + + method testRuntime() + local runtime + + st := SlowTest() + self.__testSuite.add(st) + + self.__testSuite.run() + runtime := self.__testSuite.__testReporter.__summary.runtime + + assert { 2.0 <= runtime & 3.0 > runtime } + end + + method testRunFailureTests() + local results + + assertEqual(0, *self.__testSuite.__tests) + + ft := FailureTest() + self.__testSuite.add(ft) + assertEqual(EXIT_FAILURE, self.__testSuite.run()) + + results := self.__testSuite.__testReporter.results() + assertEqual(0, results[1]) + assertNotEqual(0, results[2]) + end +end + +class SimpleTest : TestCase() + method testAssertEqual() + assertEqual(1, 1) + end + + method testAssert() + assert { 1 == 1 } + end +end + +class SuccessTest : TestCase() + method testSuccessAssertEqual() + assertEqual(1, 1) + end + + method testSuccessAssert() + assert { 1 == 1 } + end +end + +class SlowTest : TestCase() + method testSlow() + delay(2000) + end +end + +# NOTE: move all failing tests here in order to avoid exit(1) +# these tests are isolated in a different test runner. +class FailureTest : TestCase() + method testFailAssertEqual() + assertEqual(0, 9) + assertEqual("Unicon", "Icon") + end + + method testFailAssertNotEqual() + assertNotEqual(0, 0) + assertNotEqual("Unicon", "Unicon") + end + + method testFailAssert() + assert { 0 == 9 } + assert { 0 ~= 0 } + assert { "Unicon" == "Icon" } + assert { "Unicon" ~== "Unicon" } + end +end + +class Empty() + method testNotReal() + write("This should not be displayed.") + end +end + +class TestTestCase : TestCase(__calls) + $define STATUS_SUCCESS 0 + $define STATUS_FAIL 1 + + method setupClass() + self.__calls["setupClass"] +:= 1 + assertEqual(1, self.__calls["setupClass"]) + end + + method teardownClass() + self.__calls["teardownClass"] +:= 1 + assertEqual(1, self.__calls["teardownClass"]) + assertEqual(1, self.__calls["setupClass"]) + end + + method setup() + self.__calls["setup"] +:= 1 + assertEqual(self.__calls["tests"] + 1, self.__calls["setup"]) + end + + method teardown() + self.__calls["teardown"] +:= 1 + assertEqual(self.__calls["tests"], self.__calls["teardown"]) + assertEqual(self.__calls["tests"], self.__calls["setup"]) + end + + method testInitialState() + assertEqual(STATUS_SUCCESS, self.__status) + self.__calls["tests"] +:= 1 + end + + method testResetStatus() + assertEqual(STATUS_SUCCESS, self.__status) + self.__status := 255 + self.__reset() + assertEqual(STATUS_SUCCESS, self.__status) + self.__calls["tests"] +:= 1 + end + + method testAssertEqual() + assertEqual(1, 1) + assertEqual("Unicon", "Unicon") + + self.__calls["tests"] +:= 1 + end + + method testAssertNotEqual() + assertNotEqual(1, 2) + assertNotEqual("Unicon", "Icon") + + self.__calls["tests"] +:= 1 + end + + method testAssert() + assert { 1 == 1 } + assert { "Unicon" == "Unicon" } + + self.__calls["tests"] +:= 1 + end + + method testAssertFail() + assertFail { 1 == 2 } + assertFail { 1 == "Unicon" } + assertFail { "Unicon" == 1 } + + self.__calls["tests"] +:= 1 + end + +initially + __calls := table() + every call := !["setupClass", "teardownClass", "setup", "teardown", "tests"] do + __calls[call] := 0 +end + +class TestTestReporter : TestCase(__reporter) + method setup() + self.__reporter := TestReporter() + end + + method testInitialState() + local results + + assertEqual(0, *self.__reporter.__results) + + results := self.__reporter.results() + assertEqual(0, results.passed) + assertEqual(0, results.failed) + assertEqual(0.0, results.runtime) + + assertEqual(0.0, self.__reporter.__summary.runtime) + end + + method testResult() + assertEqual(0, *self.__reporter.__results) + + self.__reporter.result(classname(self), "testResult", 0) + assertEqual(1, *self.__reporter.__results) + assertEqual(classname(self), self.__reporter.__results[1][1]) + assertEqual("testResult", self.__reporter.__results[1][2]) + assertEqual(0, self.__reporter.__results[1][3]) + end + + method testRuntime() + self.__reporter.runtime(5.5) + assertEqual(5.5, self.__reporter.__summary.runtime) + end + + # TODO: not much luck with capture()'ing the output. + method testSummary() + local results + + assertEqual(0, *self.__reporter.__results) + results := self.__reporter.summary() + assertEqual(0, results.passed) + assertEqual(0, results.failed) + assertEqual(0.0, results.runtime) + + self.__reporter.result(classname(self), "testResult", 0) + assertEqual(1, *self.__reporter.__results) + results := self.__reporter.summary() + assertEqual(1, results[1]) + assertEqual(0, results[2]) + + self.__reporter.result(classname(self), "testResultFail", 1) + assertEqual(2, *self.__reporter.__results) + results := self.__reporter.summary() + assertEqual(1, results[1]) + assertEqual(1, results[2]) + + results := self.__reporter.results() + assertEqual(1, results[1]) + assertEqual(1, results[2]) + end +end + +class TestMock : TestCase(__mock) + method setup() + self.__mock := Mock() + end + + method testInitialState() + assertEqual(0, *self.__mock.__expectations) + end + + method testExpect() + self.__mock.expect("success", 1) + assertEqual(1, *self.__mock.__expectations) + assertEqual(1, self.__mock.__expectations["success"]) + + self.__mock.expect("str", "Unicon") + assertEqual("Unicon", self.__mock.__expectations["str"]) + assertEqual(2, *self.__mock.__expectations) + end + + method testVerify() + local expectation + + expectation := self.__mock.verify("unknown", []) + assertEqual(1, (((expectation === &null) & 1) | 0)) + + self.__mock.expect("verify", "expected") + expectation := self.__mock.verify("verify", []) + assertEqual("verify", expectation) + assertEqual(1, *self.__mock.__expectations) + end + + method testInvoke() + local expectation, value + + expectation := self.__mock.invoke("unknown", []) + assertEqual(1, (((expectation === &null) & 1) | 0)) + + self.__mock.expect("invoke", "expected") + value := self.__mock.invoke("invoke", []) + assertEqual("expected", value) + assertEqual(1, *self.__mock.__expectations) + end + +initially + __mock := &null +end + +procedure main() + ts := TestSuite() + + tss := TestTestSuite() + ts.add(tss) + + tc := TestTestCase() + ts.add(tc) + + tm := TestMock() + ts.add(tm) + + tr := TestTestReporter() + ts.add(tr) + + exit(ts.run()) +end diff --git a/uni/lib/unittest.icn b/uni/lib/unittest.icn new file mode 100644 index 000000000..a776192ee --- /dev/null +++ b/uni/lib/unittest.icn @@ -0,0 +1,214 @@ +############################################################################ +# +# File: unittest.icn +# +# Subject: An object-oriented unit test framework based on Kent Beck's SUnit. +# +# https://web.archive.org/web/20150315073817/http://www.xprogramming.com/testfram.htm +# +# Author: Ian Trudel +# +# Date: April 19, 2024 +# +############################################################################ + +# TODO: assertions +# - asserting objects +# - asserting nested values (list, table, objects, etc.) +# TODO: Test Reporter +# - add message to results +# - add backtrace if possible +# TODO: there should be a way to discover and load tests dynamically +# TODO: implement concurrency, see tests/unicon/tester.icn +# TODO: how to catch stdin/stdout/stderr within tests, see capture(). +# TODO: enable/disable summary. +# TODO: enable/disable verbosity. + +package unittest + +link ximage, printf + +import lang + +invocable all + +class TestCase : Object(__status) + $define STATUS_SUCCESS 0 + $define STATUS_FAIL 1 + + method setupClass() + # write(classname(self), "::setupClass()") + end + + method teardownClass() + # write(classname(self), "::teardownClass()") + end + + method setup() + # write(classname(self), "::setup()") + end + + method teardown() + # write(classname(self), "::teardown()") + end + + method __reset() + self.__status := STATUS_SUCCESS + end + + method assert(expression) + if not @expression[1] then { + self.__status := STATUS_FAIL + write("Assertion failed.") + fail + } + + return + end + + method assertFail(expression) + if not @expression[1] then return + + write("Assertion failed.") + self.__status := STATUS_FAIL + fail + end + + method assertEqual(expected, actual) + if type(expected) ~== type(actual) then { + write("Assertion failed: expected value of type ", type(expected), ", but got ", type(actual)) + self.__status := STATUS_FAIL + fail + } else if expected ~== actual then { + write("Assertion failed: expected ", expected, ", but got ", actual) + self.__status := STATUS_FAIL + fail + } + + return + end + + method assertNotEqual(unexpected, actual) + if type(unexpected) === type(actual) & unexpected === actual then { + write("Assertion failed: unexpected ", unexpected, ", but got ", actual) + self.__status := STATUS_FAIL + fail + } + + return + end + +initially + /__status := STATUS_SUCCESS +end + +class TestSuite(__tests, __testReporter) + $define EXIT_SUCCESS 0 + $define EXIT_FAILURE 1 + + method add(testCase) + if (classname(testCase) ~=== "unittest__TestCase") then { + if \testCase.__m["Type"] then { + every ctype := testCase.Type() do { + if ctype == "unittest::TestCase" then { + put(__tests, testCase) + } + } + } + } + end + + method run() + local mname, summary, t0, t1 + + t0 := gettimeofday() + + every test := !__tests do { + cname := classname(test) + + test.setupClass() + every image(!test.__m) ? { + mname := (="procedure ", tab(0)) | next + if mname ? (tab(*cname+1), ="_", match("test")) then { + write(mname) + test.__reset() + test.setup() + mname(test) + test.teardown() + + self.__testReporter.result(cname, mname, test.__status) + write() + } + } + test.teardownClass() + } + + t1 := gettimeofday() + self.__testReporter.runtime(((t1.sec - t0.sec) * 1000000 + (t1.usec - t0.usec)) / 1000000.0) + summary := self.__testReporter.summary() + + return if summary.failed > 0 then EXIT_FAILURE else EXIT_SUCCESS + end + + method output() + write(ximage(__tests)) + end + +initially + __tests := [] + /__testReporter := TestReporter() +end + +class TestReporter(__results, __summary) + $define STATUS_SUCCESS 0 + $define STATUS_FAIL 1 + + record total(passed, failed, runtime) + + method result(cname, mname, passed) + put(self.__results, [cname, mname, passed]) + if passed == STATUS_SUCCESS then self.__summary.passed +:= 1 else self.__summary.failed +:= 1 + end + + method results() + return self.__summary + end + + method runtime(time) + self.__summary.runtime := real(time) + end + + method summary() + local runtime + + runtime := sprintf("%.6r", self.__summary.runtime) + + write("=== TEST SUMMARY ===") + write("passed: " || self.__summary.passed || ", failed: " || self.__summary.failed) + write("ran " || *self.__results || " tests in " || runtime || "s") + + return self.__summary + end + +initially + /__results := [] + /__summary := total(0, 0, 0.0) +end + +class Mock(__expectations) + method expect(mname, retval) + self.__expectations[mname] := retval + end + + method verify(mname, args) + return member(self.__expectations, mname) + end + + method invoke(mname, args) + self.verify(mname, args) | fail + return self.__expectations[mname] + end + +initially + /__expectations := table() +end