diff --git a/.env.dev b/.env.dev.docker similarity index 72% rename from .env.dev rename to .env.dev.docker index e29b72ff..ce7d5641 100644 --- a/.env.dev +++ b/.env.dev.docker @@ -1,18 +1,11 @@ # General Settings NETWORK_INTERFACE=eth0 -MQTT_HOST=mqtt -MQTT_PORT=9001 - REDIS_PORT=6379 REDIS_HOST=redis -# LOG Settings -LOG_LEVEL="DEBUG" # SECC Settings -15118_MQTT_SUBSCRIBE_TOPIC=iso15118/cs -15118_MQTT_PUBLISH_TOPIC=iso15118/josev FREE_CHARGING_SERVICE=False FREE_CERT_INSTALL_SERVICE=True ALLOW_CERT_INSTALL_SERVICE=True @@ -22,3 +15,5 @@ SECC_ENFORCE_TLS=False USE_TLS=False EVCC_ENFORCE_TLS=False +# LOG Settings +LOG_LEVEL="DEBUG" diff --git a/.env.local-run b/.env.dev.local similarity index 72% rename from .env.local-run rename to .env.dev.local index 5e296804..a4a078ba 100644 --- a/.env.local-run +++ b/.env.dev.local @@ -1,18 +1,11 @@ # General Settings NETWORK_INTERFACE=eth0 -MQTT_HOST=localhost -MQTT_PORT=9001 - REDIS_PORT=10001 REDIS_HOST=localhost -# LOG Settings -LOG_LEVEL="DEBUG" # SECC Settings -15118_MQTT_SUBSCRIBE_TOPIC=iso15118/cs -15118_MQTT_PUBLISH_TOPIC=iso15118/josev FREE_CHARGING_SERVICE=False FREE_CERT_INSTALL_SERVICE=True ALLOW_CERT_INSTALL_SERVICE=True @@ -22,3 +15,6 @@ SECC_ENFORCE_TLS=False USE_TLS=False EVCC_ENFORCE_TLS=False +# LOG Settings +LOG_LEVEL="DEBUG" + diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index aa06ec01..d6416512 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -4,7 +4,7 @@ # For more info about the structure and rules, check: # https://docs.gitlab.com/ee/user/project/code_owners.html -@tropxy @mueltin @shalinnijel2 +@tropxy @MarcMueltin @shalinnijel2 diff --git a/Makefile b/Makefile index 9782d4d2..50f607d2 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,6 @@ +# The shell to run the makefile with must be defined to work properly in Linux systems +SHELL := /bin/bash + # all the recipes are phony (no files to check). .PHONY: .install-poetry docs tests build dev run poetry-update poetry-install install-local run-evcc run-secc run-ocpp mypy reformat black flake8 code-quality @@ -13,8 +16,8 @@ help: @echo "" @echo " help outputs this helper" @echo " build builds the project using Docker" - @echo " dev runs secc, mqtt and redis in dev version, using Docker" - @echo " run runs secc, mqtt and redis in prod version, using Docker" + @echo " dev runs secc and redis in dev version, using Docker" + @echo " run runs secc and redis in prod version, using Docker" @echo " install-local uses poetry to update and install iso15118 locally, including dependencies" @echo " run-secc runs the secc project locally" @echo " run-evcc runs the evcc project locally" @@ -36,15 +39,25 @@ tests: .install-poetry #poetry run flake8 pytest -vv tests poetry run pytest -vv tests -build: +.generate_v2_certs: + cd iso15118/shared/pki; ./create_certs.sh -v iso-2 + +.generate_v20_certs: + cd iso15118/shared/pki; ./create_certs.sh -v iso-20 + +build: .generate_v2_certs + # `xargs` will copy the Dockerfile template, so that it can be individually + # used by the secc and evcc services xargs -n 1 cp -v template.Dockerfile<<<"iso15118/evcc/Dockerfile iso15118/secc/Dockerfile" - # @ is used as a separator and allow us to escape '/', so we can substitute the '/' itself - # This command will convert: 's/secc/secc/g' -> 's/secc/evcc/g' - sed -i '.bkp' 's@/secc/g@/evcc/g@g' iso15118/evcc/Dockerfile + # The following command will convert: 's/secc/secc/g' -> 's/secc/evcc/g', + # in the evcc Dockerfile. + # This conversion is required, otherwise we wouldn't be able to spawn the evcc start script. + # @ is used as a separator and allows us to escape '/', so we can substitute the '/' itself + sed -i.bkp 's@/secc/g@/evcc/g@g' iso15118/evcc/Dockerfile docker-compose build dev: - # the dev file apply changes to the original compose file + # the dev file apply changes to the original compose file docker-compose -f docker-compose.yml -f docker-compose.dev.yml up run: diff --git a/README.md b/README.md index 92bc9076..74fff7d2 100644 --- a/README.md +++ b/README.md @@ -11,14 +11,6 @@ The primary dependencies to install the project are the following: > - Poetry [^3] > - Python >= 3.7 -Also, since the project depends on external custom packages, it is necessary -to set the credentials for the Switch PYPI server as ENVs: - ```shell - $ export PYPI_USER=**** - $ export PYPI_PASS=**** - ``` - -Contact André if you require the credentials. There are two recommended ways of running the project: @@ -28,73 +20,123 @@ There are two recommended ways of running the project: $ make build $ make dev ``` - Currently, only SECC will be spawned as the goal of JOSEV is to run iso15118 - as an SECC - 2. Local Installation - + Install JRE engine with the following command: + ```bash - apt update && apt install -y default-jre + sudo apt update && sudo apt install -y default-jre ``` - The JRE engine is only a temporary requirement until we replace the Java-based EXI codec (EXIficient)[^4] with our own RUST-based EXI codec. - + + The JRE engine is only a temporary requirement until we replace the Java-based + EXI codec (EXIficient)[^4] with our own Rust-based EXI codec. + Install the module using `poetry` and run the main script related to the EVCC or SECC instance you want to run. Switch to the iso15118 directory and run: + ```bash $ poetry update $ poetry install $ python iso15118/secc/start_secc.py # or python iso15118/evcc/start_evcc.py ``` + For convenience, the Makefile, present in the project, helps you to run these steps. Thus, in the terminal run: + ```bash $ make install-local $ make run-secc ``` + This will call the poetry commands above and run the start script of the secc. Option number `1` has the advantage of running within Docker, where everything -is fired up automatically, including tests and linting. Currently, the -docker-compose does not set the `network-mode` as 'host', but this may be -required in order to bridge correctly IPv6 frames. +is fired up automatically, including certificates generation, tests and linting. + +Also both SECC and EVCC are spawned, automatically. + + +For option number `2`, the certificates need to be provided. The project includes +a script to help on the generation of -2 and -20 certificates. This script +is located under `iso15118/shared/pki/` directory and is called `create_certs.sh`. +The following command provides a helper for the script usage: + +```bash +$ ./create_certs.sh -h +``` + +--- +**IPv6 WARNING** + +For the system to work locally, the network interface to be used needs to have +an IPv6 local-link address assigned. + + +For Docker, the `docker-compose.yml` was configured to create an `IPv6` network +called `ipv6_net`, which enables the containers to acquire a local-link address, +which is required to establish an ISO 15118 communication. This configuration is +fine if the user wants to test, in isolation, the EVCC and SECC and allow ISO 15118 +communication. This configuration works for both Linux and BSD systems. + +However, the usage of an internal `ipv6_net` network, in Docker, does not allow the +host to reach link-local addresses. This would pose a problem, as it would require +the application to use the global-link address, which is not supported by ISO 15118. + +The solution is to use the `network_mode: host` feature of Docker, which replicates +the host network topology within the Docker world, i.e. the containers and the +host share the same network. This way, Docker can directly access the virtual +network interface created by the HomePlug Green PHY module, making it possible +to use the local-link address. + +Currently, `network_mode: host` just works within Linux environments [^5] [^6]. +Since the Switch team relies mostly on MacOS and this project is on a development stage, +`network_mode` is not used by default, but it is possible to use it if the contents of the +file `docker-compose-host-mode.yml` are copied to the main compose file, `docker-compose.yml`. +In that case, it is advised to back up the compose file. -The project also requires an MQTT broker connection, so be sure to set up -a broker correctly and to add the necessary credentials and URL. -For more information about the MQTT API used by Switch, please contact us. +--- -Finally, the project includes a few configuration variables whose default + +## Environment Settings + +Finally, the project includes a few configuration variables, whose default values can be modified by setting them as environmental variables. The following table provides a few of the available variables: -| ENV | Default Value | Description | -| -------------------------- | --------------------- | ---------------------------------------------------------------------------------------- | -| NETWORK_INTERFACE | `eth0` | HomePlug Green PHY Network Interface from which the high-level communication (HLC) will be established | -| MQTT_HOST | `localhost` | MQTT Broker URL | -| MQTT_PORT | `9001` | MQTT Broker PORT | -| MQTT_USER | `None` | Username for Client Authorization | -| MQTT_PASS | `None` | Password for Client Authorization -| 15118_MQTT_SUBSCRIBE_TOPIC | `iso15118/cs` | Mqtt Subscription Topic -| 15118_MQTT_PUBLISH_TOPIC | `iso15118/josev` | Mqtt Publish Topic -| REDIS_HOST | `localhost` | Redis Host URL -| REDIS_PORT | `10001` | Redis Port -| - -The project includes an environment file for dev purposes on the root directoy -`.env.development`, which contains all settings that can be set. - -In order to run the project in production, an `.env` file must be created with -the desired settings. This means, if development settings are desired, one can -simply copy the content of `.env.development` to `.env`. +| ENV | Default Value | Description | +| -------------------------- | ---------------- | ------------------------------------------------------------------------------------------------------ | +| NETWORK_INTERFACE | `eth0` | HomePlug Green PHY Network Interface from which the high-level communication (HLC) will be established | +| REDIS_HOST | `localhost` | Redis Host URL | +| REDIS_PORT | `10001` | Redis Port | + + +The project includes a few environmental files, in the root directory, for +different purposes: + +* `.env.dev.docker` - ENV file with development settings, tailored to be used with docker +* `.env.dev.local` - ENV file with development settings, tailored to be used with +the local host + + +If the user runs the project locally, e.g. using `$ make build && make run-secc`, +it is required to create a `.env` file, containing the required settings. + +This means, if development settings are desired, one can simply copy the contents +of `.env.dev.local` to `.env`. If Docker is used, the command `make run` will try to get the `.env` file; -The command `make dev` will fetch the contents of `.env.development`. +The command `make dev` will fetch the contents of `.env.dev.docker` - thus, +in this case, the user does not need to create a `.env` file, as Docker will +automatically fetch the `.env.dev.docker` one. +The key-value pairs defined in the `.env` file directly affect the settings +present in `secc_settings.py` and `evcc_settings.py`. In these scripts, the +user will find all the settings that can be configured. ## Integration Test with an EV Simulator @@ -116,3 +158,5 @@ This integration test was tested under: [^2]: https://www.switch-ev.com/news-and-events/new-features-and-timeline-for-iso15118-20 [^3]: https://python-poetry.org/docs/#installation [^4]: https://exificient.github.io/ +[^5]: https://docs.docker.com/network/host/ +[^6]: https://docs.docker.com/desktop/mac/networking/ diff --git a/docker-compose-host-mode.yml b/docker-compose-host-mode.yml new file mode 100644 index 00000000..a12cffc4 --- /dev/null +++ b/docker-compose-host-mode.yml @@ -0,0 +1,25 @@ +version: '3.9' +services: + secc: + hostname: secc + build: + context: . + dockerfile: iso15118/secc/Dockerfile + depends_on: + - redis + network_mode: host + + evcc: + hostname: evcc + build: + context: . + dockerfile: iso15118/evcc/Dockerfile + depends_on: + - redis + - secc + network_mode: host + + redis: + image: redis:6.2.6-alpine + ports: + - "6379:6379" diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml index ddf887cf..7398f9e4 100644 --- a/docker-compose.dev.yml +++ b/docker-compose.dev.yml @@ -1,6 +1,6 @@ version: '3.9' services: secc: - env_file: .env.dev + env_file: .env.dev.docker evcc: - env_file: .env.dev + env_file: .env.dev.docker diff --git a/docker-compose.yml b/docker-compose.yml index bdb267e3..26b8d158 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -4,26 +4,40 @@ services: hostname: secc build: context: . - args: - - PYPI_USER=${PYPI_USER} - - PYPI_PASS=${PYPI_PASS} dockerfile: iso15118/secc/Dockerfile depends_on: - redis + networks: + - ipv6_net evcc: hostname: evcc build: context: . - args: - - PYPI_USER=${PYPI_USER} - - PYPI_PASS=${PYPI_PASS} dockerfile: iso15118/evcc/Dockerfile depends_on: - redis - secc + networks: + - ipv6_net redis: image: redis:6.2.6-alpine ports: - "6379:6379" + + +networks: + ipv6_net: + enable_ipv6: true + driver: bridge + # driver_opts is here just a precaution as some docker compose versions do not process the above "enable_ipv6" + driver_opts: + com.docker.network.enable_ipv6: "true" + ipam: + driver: default + config: + #- subnet: 172.16.238.0/24 + # gateway: 172.16.238.1 + - subnet: 2001:db8:a::/64 + gateway: 2001:db8:a::1 diff --git a/iso15118/evcc/comm_session_handler.py b/iso15118/evcc/comm_session_handler.py index 2f199261..dd9f5bb1 100644 --- a/iso15118/evcc/comm_session_handler.py +++ b/iso15118/evcc/comm_session_handler.py @@ -232,19 +232,16 @@ async def __init__. Therefore, we need to create a separate async ) return + self.udp_client = UDPClient(self._rcv_queue) self.list_of_tasks = [ + self.udp_client.start(), self.get_from_rcv_queue(self._rcv_queue), self.restart_sdp(True), ] - self.udp_client = await UDPClient.create(self._rcv_queue) + logger.debug("Communication session handler started") - try: - await wait_till_finished(self.list_of_tasks) - except Exception: - logger.exception("Communication session handler has crashed") - # TODO: Reraise so the process ends with a non-zero exit code. - raise + await wait_till_finished(self.list_of_tasks) async def send_sdp(self): """ @@ -253,6 +250,10 @@ async def send_sdp(self): can establish a TCP connection to the SECC's TCP server, given the IP address and port contained in the SDP Response """ + # the following loop is to allow the synchronization of the udp client + # and the task to handle the SDP restart + while not self.udp_client.started: + await asyncio.sleep(0.1) security = Security.NO_TLS if USE_TLS: security = Security.TLS diff --git a/iso15118/evcc/controller/simulator.py b/iso15118/evcc/controller/simulator.py index 5fb987a5..dbee31bc 100644 --- a/iso15118/evcc/controller/simulator.py +++ b/iso15118/evcc/controller/simulator.py @@ -3,7 +3,6 @@ retrieve data from the EV. The DummyEVController overrides all abstract methods from EVControllerInterface. """ -import asyncio import logging.config from typing import List, Optional, Tuple @@ -169,7 +168,9 @@ def continue_charging(self) -> bool: return False else: self.charging_loop_cycles += 1 - asyncio.sleep(0.5) + # The line below can just be called once process_message in all states + # are converted to async calls + # await asyncio.sleep(0.5) return True def store_contract_cert_and_priv_key(self, contract_cert: bytes, priv_key: bytes): diff --git a/iso15118/evcc/start_evcc.py b/iso15118/evcc/start_evcc.py index ee456bc1..46eecab4 100644 --- a/iso15118/evcc/start_evcc.py +++ b/iso15118/evcc/start_evcc.py @@ -15,7 +15,13 @@ async def main(): # TODO: we need to read the ISO 15118 version and the Security value # from some settings file session_handler = CommunicationSessionHandler() - await session_handler.start_session_handler() + try: + await session_handler.start_session_handler() + except Exception as exc: + logger.error(f"EVCC terminated: {exc}") + # Re-raise so the process ends with a non-zero exit code and the + # watchdog can restart the service + raise def run(): diff --git a/iso15118/evcc/states/iso15118_2_states.py b/iso15118/evcc/states/iso15118_2_states.py index 02e433fa..ff481801 100644 --- a/iso15118/evcc/states/iso15118_2_states.py +++ b/iso15118/evcc/states/iso15118_2_states.py @@ -920,19 +920,7 @@ def process_message( ) logger.debug(f"ChargeProgress is set to {ChargeProgress.RENEGOTIATE}") elif ac_evse_status.evse_notification == EVSENotification.STOP_CHARGING: - power_delivery_req = PowerDeliveryReq( - charge_progress=ChargeProgress.STOP, - sa_schedule_tuple_id=self.comm_session.selected_schedule, - ) - self.create_next_message( - PowerDelivery, - power_delivery_req, - Timeouts.POWER_DELIVERY_REQ, - Namespace.ISO_V2_MSG_DEF, - ) - self.comm_session.charging_session_stop = ChargingSession.TERMINATE - # TODO Implement also a mechanism for pausing - logger.debug(f"ChargeProgress is set to {ChargeProgress.STOP}") + self.stop_charging() elif self.comm_session.ev_controller.continue_charging(): self.create_next_message( @@ -941,6 +929,23 @@ def process_message( Timeouts.CHARGING_STATUS_REQ, Namespace.ISO_V2_MSG_DEF, ) + else: + self.stop_charging() + + def stop_charging(self): + power_delivery_req = PowerDeliveryReq( + charge_progress=ChargeProgress.STOP, + sa_schedule_tuple_id=self.comm_session.selected_schedule, + ) + self.create_next_message( + PowerDelivery, + power_delivery_req, + Timeouts.POWER_DELIVERY_REQ, + Namespace.ISO_V2_MSG_DEF, + ) + self.comm_session.charging_session_stop = ChargingSession.TERMINATE + # TODO Implement also a mechanism for pausing + logger.debug(f"ChargeProgress is set to {ChargeProgress.STOP}") class CurrentDemand(StateEVCC): diff --git a/iso15118/evcc/transport/tcp_client.py b/iso15118/evcc/transport/tcp_client.py index f613bdca..f77a92b0 100644 --- a/iso15118/evcc/transport/tcp_client.py +++ b/iso15118/evcc/transport/tcp_client.py @@ -3,9 +3,8 @@ import socket from ipaddress import IPv6Address -from iso15118.evcc.evcc_settings import NETWORK_INTERFACE, USE_TLS +from iso15118.evcc.evcc_settings import NETWORK_INTERFACE from iso15118.shared import settings -from iso15118.shared.network import get_link_local_addr from iso15118.shared.security import get_ssl_context logging.config.fileConfig( @@ -32,7 +31,10 @@ def __init__(self, session_handler_queue, port, is_tls): @staticmethod async def create( - host: IPv6Address, port: int, session_handler_queue: asyncio.Queue, is_tls: bool + host: IPv6Address, + port: int, + session_handler_queue: asyncio.Queue, + is_tls: bool ) -> "TCPClient": """ TCPClient setup @@ -42,9 +44,9 @@ async def create( # When using IPv6 addresses, the interface must be specified in the # host IP string or we need to connect using the full socket address, # which includes the scope id. This is why, in the next line, - # we try to acquire the main interface (nic). - _, nic = await get_link_local_addr(port, NETWORK_INTERFACE) - full_host_address = host.compressed + f"%{nic}" + # we concatenate the host IP with the NIC defined with the + # NETWORK_INTERFACE env + full_host_address = host.compressed + f"%{NETWORK_INTERFACE}" try: self.reader, self.writer = await asyncio.open_connection( diff --git a/iso15118/evcc/transport/udp_client.py b/iso15118/evcc/transport/udp_client.py index 848a6b12..cbb23d53 100644 --- a/iso15118/evcc/transport/udp_client.py +++ b/iso15118/evcc/transport/udp_client.py @@ -3,14 +3,13 @@ import socket import struct from asyncio import DatagramProtocol, DatagramTransport -from typing import Tuple +from typing import Tuple, Optional from iso15118.evcc.evcc_settings import NETWORK_INTERFACE from iso15118.shared import settings -from iso15118.shared.exceptions import NoLinkLocalAddressError from iso15118.shared.messages.timeouts import Timeouts from iso15118.shared.messages.v2gtp import V2GTPMessage -from iso15118.shared.network import SDP_MULTICAST_GROUP, SDP_SERVER_PORT, get_nic +from iso15118.shared.network import SDP_MULTICAST_GROUP, SDP_SERVER_PORT, validate_nic from iso15118.shared.notifications import ( ReceiveTimeoutNotification, UDPPacketNotification, @@ -38,31 +37,23 @@ class UDPClient(DatagramProtocol): https://docs.python.org/3/library/asyncio-protocol.html """ - # The UDP transport protocol - _transport: DatagramTransport - _last_message_sent: V2GTPMessage - def __init__(self, session_handler_queue: asyncio.Queue): self._session_handler_queue: asyncio.Queue = session_handler_queue # Indication whether or not the UDP client connection is open or closed - _closed: bool = False + self.started: bool = False self._rcv_queue: asyncio.Queue = asyncio.Queue() + self._transport: Optional[DatagramTransport] = None @staticmethod - async def create(session_handler_queue: asyncio.Queue): + def _create_socket() -> 'socket': """ - This method is necessary because Python does not allow - async def __init__. - Therefore, we need to create a separate async method to be - our constructor. + This method creates an IPv6 socket configured to send multicast datagrams """ - # Get a reference to the event loop as we plan to use a low-level API - # (see loop.create_datagram_endpoint()) - loop = asyncio.get_running_loop() - self = UDPClient(session_handler_queue) + # raises an exception if the interface chosen is invalid + validate_nic(NETWORK_INTERFACE) - # Initialise socket for IPv6 datagrams + # Initialise the socket for IPv6 datagrams # Address family (determines network layer protocol, here IPv6) # Socket type (datagram, determines transport layer protocol UDP) sock = socket.socket(family=socket.AF_INET6, type=socket.SOCK_DGRAM) @@ -75,34 +66,36 @@ async def __init__. ttl = struct.pack("@i", 1) sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, ttl) - # The IP_MULTICAST_IF or IPV6_MULTICAST_IF settings tell your socket - # which interface to send its multicast packets on. It's a separate, - # independent setting from the interface that you bound your socket to - # with bind(), since bind() controls which interface(s) the socket - # receives multicast packets from. - - nic: str = "" + # Restrict multicast operation to the given interface + # The IP_MULTICAST_IF or IPV6_MULTICAST_IF settings tell the socket + # which interface it shall send its multicast packets. It can be seen + # as the dual of bind(), in the server side, since bind() controls which + # interface(s) the socket receives multicast packets from. + interface_index = socket.if_nametoindex(NETWORK_INTERFACE) + sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_IF, interface_index) - try: - nic = get_nic(NETWORK_INTERFACE) - except NoLinkLocalAddressError as exc: - logger.exception( - "Can't assign interface for UDP server, unable " - f"to find network interface card. {exc}" - ) + return sock - interface_index = socket.if_nametoindex(nic) - sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_IF, interface_index) + async def start(self): + """ + Starts the UDP client service - transport, _ = await loop.create_datagram_endpoint( + """ + # Get a reference to the event loop as we plan to use a low-level API + # (see loop.create_datagram_endpoint()) + loop = asyncio.get_running_loop() + self._transport, _ = await loop.create_datagram_endpoint( protocol_factory=lambda: self, - sock=sock, + sock=self._create_socket(), ) - self._transport = transport - logger.debug("UDP client started") - - return self + def connection_made(self, transport): + """ + Callback of the lower level API, which is called when the connection to + the socket succeeds + """ + logger.debug("UDP client socket ready") + self.started = True def datagram_received(self, data: bytes, addr: Tuple[str, int]): """ @@ -125,9 +118,11 @@ def datagram_received(self, data: bytes, addr: Tuple[str, int]): def error_received(self, exc): logger.exception(f"Error received: {exc}") + self.started = False def connection_lost(self, exc): logger.exception(f"Client closed: {exc}") + self.started = False def send(self, message: V2GTPMessage): """ @@ -178,7 +173,6 @@ def send(self, message: V2GTPMessage): self._transport.sendto( message.to_bytes(), (SDP_MULTICAST_GROUP, SDP_SERVER_PORT) ) - self._last_message_sent = message logger.debug(f"Message sent: {message}") diff --git a/iso15118/secc/comm_session_handler.py b/iso15118/secc/comm_session_handler.py index 153013d0..a45d772a 100644 --- a/iso15118/secc/comm_session_handler.py +++ b/iso15118/secc/comm_session_handler.py @@ -156,7 +156,7 @@ async def __init__. constructor. """ - self.udp_server = await UDPServer.create(self._rcv_queue) + self.udp_server = UDPServer(self._rcv_queue) self.tcp_server = TCPServer(self._rcv_queue) self.list_of_tasks = [ @@ -170,12 +170,7 @@ async def __init__. logger.debug("Communication session handler started") - try: - await wait_till_finished(self.list_of_tasks) - except Exception: - logger.exception("Communication session handler has crashed") - # TODO: Reraise so the process ends with a non-zero exit code. - raise + await wait_till_finished(self.list_of_tasks) async def get_from_rcv_queue(self, queue: asyncio.Queue): """ diff --git a/iso15118/secc/start_secc.py b/iso15118/secc/start_secc.py index 0fb9c4db..80686e78 100644 --- a/iso15118/secc/start_secc.py +++ b/iso15118/secc/start_secc.py @@ -13,7 +13,13 @@ async def main(): the SECC (Supply Equipment Communication Controller) """ session_handler = CommunicationSessionHandler() - await session_handler.start_session_handler() + try: + await session_handler.start_session_handler() + except Exception as exc: + logger.error(f"SECC terminated: {exc}") + # Re-raise so the process ends with a non-zero exit code and the + # watchdog can restart the service + raise def run(): diff --git a/iso15118/secc/transport/tcp_server.py b/iso15118/secc/transport/tcp_server.py index f36efe86..fac08b29 100644 --- a/iso15118/secc/transport/tcp_server.py +++ b/iso15118/secc/transport/tcp_server.py @@ -5,7 +5,7 @@ from iso15118.secc.secc_settings import NETWORK_INTERFACE from iso15118.shared import settings -from iso15118.shared.network import get_link_local_addr, get_tcp_port +from iso15118.shared.network import get_link_local_full_addr, get_tcp_port from iso15118.shared.notifications import TCPClientNotification from iso15118.shared.security import get_ssl_context @@ -93,7 +93,7 @@ async def server_factory(self, tls: bool) -> None: # Allows address to be reused sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - self.full_ipv6_address, nic = await get_link_local_addr(port, NETWORK_INTERFACE) + self.full_ipv6_address = await get_link_local_full_addr(port, NETWORK_INTERFACE) self.ipv6_address_host = self.full_ipv6_address[0] # Bind the socket to the IP address and port for receiving @@ -112,7 +112,7 @@ async def server_factory(self, tls: bool) -> None: logger.debug( f"{server_type} server started at " - f"address {self.ipv6_address_host}%{nic} and " + f"address {self.ipv6_address_host}%{NETWORK_INTERFACE} and " f"port {port}" ) diff --git a/iso15118/secc/transport/udp_server.py b/iso15118/secc/transport/udp_server.py index 51e2fc86..1233e420 100644 --- a/iso15118/secc/transport/udp_server.py +++ b/iso15118/secc/transport/udp_server.py @@ -3,13 +3,12 @@ import socket import struct from asyncio import DatagramTransport -from typing import Tuple +from typing import Tuple, Optional -from iso15118.secc import secc_settings +from iso15118.secc.secc_settings import NETWORK_INTERFACE from iso15118.shared import settings -from iso15118.shared.exceptions import NoLinkLocalAddressError from iso15118.shared.messages.v2gtp import V2GTPMessage -from iso15118.shared.network import SDP_MULTICAST_GROUP, SDP_SERVER_PORT, get_nic +from iso15118.shared.network import SDP_MULTICAST_GROUP, SDP_SERVER_PORT, validate_nic from iso15118.shared.notifications import ( ReceiveTimeoutNotification, UDPPacketNotification, @@ -42,28 +41,20 @@ class UDPServer(asyncio.DatagramProtocol): https://docs.python.org/3/library/asyncio-protocol.html """ - _transport: DatagramTransport - _last_message_sent: V2GTPMessage - def __init__(self, session_handler_queue: asyncio.Queue): - self._closed = False + self.started: bool = False self._session_handler_queue: asyncio.Queue = session_handler_queue self._rcv_queue: asyncio.Queue = asyncio.Queue() + self._transport: Optional[DatagramTransport] = None @staticmethod - async def create(session_handler_queue: asyncio.Queue) -> "UDPServer": + def _create_socket() -> 'socket': """ This method is necessary because Python does not allow async def __init__. Therefore, we need to create a separate async method to be our constructor. """ - # Get a reference to the event loop as we plan to use a low-level API - # (see loop.create_datagram_endpoint()) - loop = asyncio.get_running_loop() - - self = UDPServer(session_handler_queue) - # Initialise socket for IPv6 datagrams # Address family (determines network layer protocol, here IPv6) # Socket type (datagram, determines transport layer protocol UDP) @@ -88,18 +79,9 @@ async def __init__. # aton stands for "Ascii TO Numeric" multicast_group_bin = socket.inet_pton(socket.AF_INET6, SDP_MULTICAST_GROUP) - nic: str = "" + validate_nic(NETWORK_INTERFACE) - try: - nic = get_nic(secc_settings.NETWORK_INTERFACE) - except NoLinkLocalAddressError as exc: - logger.exception( - "Could not assign an interface for the UDP " - "server, unable to find network interface card. " - f"{exc}" - ) - - interface_idx = socket.if_nametoindex(nic) + interface_idx = socket.if_nametoindex(NETWORK_INTERFACE) join_multicast_group_req = ( multicast_group_bin + struct.pack("@I", interface_idx) # address + interface @@ -108,30 +90,35 @@ async def __init__. socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, join_multicast_group_req ) + return sock + + async def start(self): + """UDP server tasks to start""" + # Get a reference to the event loop as we plan to use a low-level API + # (see loop.create_datagram_endpoint()) + loop = asyncio.get_running_loop() # One protocol instance will be created to serve all client requests - transport, _ = await loop.create_datagram_endpoint( + self._transport, _ = await loop.create_datagram_endpoint( lambda: self, - sock=sock, + sock=self._create_socket(), reuse_address=True, ) - self._transport = transport - logger.debug( "UDP server started at address " - f"{SDP_MULTICAST_GROUP}%{nic} " + f"{SDP_MULTICAST_GROUP}%{NETWORK_INTERFACE} " f"and port {SDP_SERVER_PORT}" ) + tasks = [self.rcv_task()] + await wait_till_finished(tasks) - return self - - # def connection_made(self, transport): - # """ - # Callback of the lower level API when the connection to - # the socket succeeded - # """ - # logger.debug("UDP server socket ready") - # self._transport = transport + def connection_made(self, transport): + """ + Callback of the lower level API, which is called when the connection to + the socket succeeds + """ + logger.debug("UDP server socket ready") + self.started = True def datagram_received(self, data: bytes, addr: Tuple[str, int]): """ @@ -173,12 +160,7 @@ def connection_lost(self, exc): """ reason = f". Reason: {exc}" if exc else "" logger.exception(f"UDP server closed. {reason}") - self._closed = True - - async def start(self): - """UDP server tasks to start""" - tasks = [self.rcv_task()] - await wait_till_finished(tasks) + self.started = False def send(self, message: V2GTPMessage, addr: Tuple[str, int]): """ @@ -186,7 +168,6 @@ def send(self, message: V2GTPMessage, addr: Tuple[str, int]): name of the last message sent for debugging purposes. """ self._transport.sendto(message.to_bytes(), addr) - self._last_message_sent = message async def rcv_task(self, timeout: int = None): """ diff --git a/iso15118/shared/exceptions.py b/iso15118/shared/exceptions.py index 1309fb3f..05f867fe 100644 --- a/iso15118/shared/exceptions.py +++ b/iso15118/shared/exceptions.py @@ -1,6 +1,14 @@ from typing import Any +class InvalidInterfaceError(Exception): + """ + This error is raised when the specified interface is not found under the + available list of interfaces or a link-local address is not associated with + it + """ + + class NoLinkLocalAddressError(Exception): """ Is thrown if no IPv6 link-local address can be found. Used by TCP/TLS diff --git a/iso15118/shared/network.py b/iso15118/shared/network.py index 46965331..4de213c8 100644 --- a/iso15118/shared/network.py +++ b/iso15118/shared/network.py @@ -8,7 +8,9 @@ import psutil from iso15118.shared import settings -from iso15118.shared.exceptions import MACAddressNotFound, NoLinkLocalAddressError +from iso15118.shared.exceptions import (MACAddressNotFound, + NoLinkLocalAddressError, + InvalidInterfaceError) logging.config.fileConfig( fname=settings.LOGGER_CONF_PATH, disable_existing_loggers=False @@ -19,10 +21,10 @@ SDP_SERVER_PORT = 15118 -def _search_link_local_addr(nic: str, nic_addr_list: list) -> Union[IPv6Address, None]: +def _get_link_local_addr(nic: str) -> Union[IPv6Address, None]: """ Provides the IPv6 link-local address for the network interface card - (NIC) provided. + (NIC) address list provided. Args: nic_addr_list A list of tuples per network interface card (NIC), @@ -30,12 +32,14 @@ def _search_link_local_addr(nic: str, nic_addr_list: list) -> Union[IPv6Address, More info: https://psutil.readthedocs.io/en/latest/#psutil.net_if_addrs Returns: - The first IPv6 link-local address from the given list of NIC - addresses + The IPv6 link-local address from the given list of NIC + addresses, if exists Raises: NoLinkLocalAddressError if no IPv6 link-local address can be found """ + nics_with_addresses = psutil.net_if_addrs() + nic_addr_list = nics_with_addresses[nic] for nic_addr in nic_addr_list: addr_family = nic_addr[0] # Remove any interface after the IP address with .split('%')[0] to @@ -45,13 +49,11 @@ def _search_link_local_addr(nic: str, nic_addr_list: list) -> Union[IPv6Address, if addr_family == socket.AF_INET6 and IPv6Address(address).is_link_local: return IPv6Address(address) - logger.debug( - "Could not find IPv6 link-local address for network " f"interface card {nic}" - ) - return None + raise NoLinkLocalAddressError( + f"No link-local address was found for interface {nic}") -async def get_full_ipv6_address(host: str, port: int) -> Tuple[str, int, int, int]: +async def _get_full_ipv6_address(host: str, port: int) -> Tuple[str, int, int, int]: """ loop.getaddrinfo returns a list of tuples containing [(address_family, socktype, proto, canonname, socket_address)]. @@ -102,67 +104,66 @@ async def get_full_ipv6_address(host: str, port: int) -> Tuple[str, int, int, in return socket_address -async def get_link_local_addr( - port: int, evcc_settings_nic: str -) -> Tuple[Tuple[str, int, int, int], str]: +def validate_nic(nic: str) -> None: + """ - Provides the IPv6 link-local address for the network interface card - (NIC) configured in the secc_settings.py file. If no NIC is configured, the - available NICs are scanned for the first available IPv6 link-local - address. + Checks if the Network Interface Card (NIC) provided exists on the system + and contains a link-local address + + Args: + nic (str): The network interface card identifier + + Raises: + InterfaceNotFoundError if the specified interface could not be found + or if no IPv6 link-local address could be found + """ + try: + _get_link_local_addr(nic) + except KeyError as exc: + raise InvalidInterfaceError( + f"No interface {nic} with this name was found" + ) from exc + except NoLinkLocalAddressError as exc: + raise InvalidInterfaceError(f"Interface {nic} has no link-local address " + f"associated with it") from exc + + +async def get_link_local_full_addr( + port: int, + nic: str +) -> Tuple[str, int, int, int]: + """ + Provides the full IPv6 link-local address for the network interface card + (NIC) specified. The full address contains the entire socket address, for example, + + ('fe80::4fd:9dc8:b138:3bcc', 65334, 0, 5) + where: - psutil.net_if_addrs() returns the addresses associated to each NIC - (network interface card) installed on the system as a dictionary whose - keys are the NIC names and value is a list of named tuples for each - address assigned to the NIC. - More info: https://psutil.readthedocs.io/en/latest/#psutil.net_if_addrs + 'fe80::4fd:9dc8:b138:3bcc' - is the IPv6 base address (host) + 65334 - port + 0 - flowinfo + 5 - scope_id + + Note: + psutil.net_if_addrs() returns a dict, whose keys are the NIC names installed + on the system and the values are a list of named tuples for each address + assigned to the NIC. + More info: https://psutil.readthedocs.io/en/latest/#psutil.net_if_addrs Args: port: The port used for the IPv6 link-local address - evcc_settings_nic: The Network Interface Card, if configured in the corresponding - settings file (either evcc_settings.py or secc_settings.py) + nic: The Network Interface Card Returns: - A tuple containing an IPv6 link-local address tuple (in the form of + An IPv6 link-local address tuple (in the form of (IPv6 base address, port, flowinfo, scope_ip), where the tuple entries - are of type Tuple[str, int, int, int]) and the network interface card - - Raises: - NoLinkLocalAddressError if no IPv6 link-local address can be found + are of type Tuple[str, int, int, int]) """ - nics_with_addresses = psutil.net_if_addrs() + ip_address = _get_link_local_addr(nic) - if evcc_settings_nic: - try: - nic_addr_list = nics_with_addresses[evcc_settings_nic] - ip_address = _search_link_local_addr(evcc_settings_nic, nic_addr_list) - - if ip_address: - nic_address = str(ip_address) + f"%{evcc_settings_nic}" - socket_address = await get_full_ipv6_address(nic_address, port) - return socket_address, evcc_settings_nic - - raise NoLinkLocalAddressError( - f"Network interface card (NIC) '{evcc_settings_nic}' configured in " - "settings does not yield a local-link IPv6 address." - ) - except KeyError as exc: - raise NoLinkLocalAddressError( - f"Network interface card (NIC) " - f"'{evcc_settings_nic}' configured in settings but " - "not found." - ) from exc - else: - # In case no NIC was provided in an EVCC or SECC settings file - for nic in nics_with_addresses: - ip_address = _search_link_local_addr(nic, nics_with_addresses[nic]) - # TODO: Once we move to a linux container, remove the MacOS lo0 - if ip_address and nic not in ["lo0", "lo"]: - nic_address = str(ip_address) + f"%{nic}" - socket_address = await get_full_ipv6_address(nic_address, port) - return socket_address, nic - - raise NoLinkLocalAddressError("Could not find IPv6 link-local address") + nic_address = str(ip_address) + f"%{nic}" + socket_address = await _get_full_ipv6_address(nic_address, port) + return socket_address def get_tcp_port() -> int: @@ -173,79 +174,20 @@ def get_tcp_port() -> int: return randint(49152, 65535) -def get_nic(settings_nic: str = None, exclude_loopback_nic: bool = False) -> str: +def get_nic_mac_address(nic: str) -> str: """ - Provides the network interface card (NIC) to use for UDP and TCP client - and server. First, the value for settings.NETWORK_INTERFACE is - looked up and returned, if not an empty string. If no NIC is provided - in secc_settings.py, then the first NIC, which has an IPv6 link-local - address, is returned. - - An example for a NIC is 'en0' or 'lo0'. - See ifconfig on Unix-based systems and ipconfig on Windows. - + This method returns the MAC Addess of a specific NIC or the first one + associated with an IPv6 link-local address. Args: - settings_nic (str): The Network interface identifier - exclude_loopback_nic (bool): Flag to exclude the loopback from the - result - Returns: - A network interface card (NIC) (for example 'en0') - - Raises: - NoLinkLocalAddressError, in the unlikely case no suitable NIC - can be found. - """ - if settings_nic: - return settings_nic - - # In case no NIC was provided in an EVCC or SECC settings file - nics_with_addresses = psutil.net_if_addrs() - for nic in nics_with_addresses: - ip_address = _search_link_local_addr(nic, nics_with_addresses[nic]) - if ip_address: - if nic in ["lo0", "lo"] and exclude_loopback_nic: - continue - return nic - - raise NoLinkLocalAddressError( - "Could not find a suitable network " - "interface card with an IPv6 " - "link-local address" - ) + nic (str): The Network Interface Card + Returns: + A MAC address in the format '8c:85:90:a3:96:e3' (str) -def get_nic_mac_address(nic_id: str = "") -> str: - """ - This method returns the MAC Addess of a specific NIC or the first one - associated with a IPv6 link-local address. - - psutil.net_if_addrs() returns a dict where the keys are the NIC names - and the values are a list with the different family addresses, e.g. for en0 - - {'en0': [snicaddr(family=, - address='192.168.21.132', netmask='255.255.255.0', - broadcast='192.168.21.255', ptp=None), - snicaddr(family=, - address='8c:85:90:a3:96:e3', - netmask=None, broadcast=None, ptp=None), - snicaddr(family=, - address='fe80::100d:a038:a617:6568%en0', - netmask='ffff:ffff:ffff:ffff::', - broadcast=None, ptp=None) - ], - } """ nics_with_addresses = psutil.net_if_addrs() - if not nic_id: - try: - nic_id = get_nic(settings_nic=nic_id, exclude_loopback_nic=True) - except NoLinkLocalAddressError: - raise MACAddressNotFound( - "Incapable of finding a suitable NIC" - ) from NoLinkLocalAddressError - if nic_id in nics_with_addresses: - nic = nics_with_addresses[nic_id] - for addr in nic: - if addr.family == psutil.AF_LINK: - return addr.address - raise MACAddressNotFound(f"MAC not found for NIC {nic_id}") + nic_addr_list = nics_with_addresses[nic] + for addr in nic_addr_list: + if addr.family == psutil.AF_LINK: + return addr.address + raise MACAddressNotFound(f"MAC not found for NIC {nic}") diff --git a/template.Dockerfile b/template.Dockerfile index d3adf2c2..08c65935 100644 --- a/template.Dockerfile +++ b/template.Dockerfile @@ -1,9 +1,6 @@ # Build image FROM python:3.10.0-buster as build -ARG PYPI_USER -ARG PYPI_PASS - WORKDIR /usr/src/app ENV PYTHONFAULTHANDLER=1 \ @@ -31,7 +28,6 @@ RUN sed -i 's/secc/secc/g' pyproject.toml # However, if we run poetry config virtualenvs.create false, then we dont. # Do not create a virtual poetry env as we already are in an isolated container RUN poetry config virtualenvs.create false -RUN poetry config http-basic.pypi-switch $PYPI_USER $PYPI_PASS # Install dependencies and the project in the venv RUN poetry update && poetry install --no-interaction --no-ansi @@ -53,8 +49,6 @@ RUN poetry build # Runtime image (which is smaller than the build one) FROM python:3.10.0-buster -ARG PYPI_USER -ARG PYPI_PASS WORKDIR /usr/src/app # Installs Java RUN apt update && apt install -y default-jre @@ -63,8 +57,16 @@ RUN python -m venv /venv # copy dependencies and wheel from the build stage COPY --from=build /usr/src/app/dist/ dist/ # This will install the wheel in the venv -# We need to specify the Switch Pypis server as extra-index to look for, in -# order to install switch custom libs -RUN /venv/bin/pip install dist/*.whl --extra-index-url https://$PYPI_USER:$PYPI_PASS@pypi.switch-ev.com/simple +RUN /venv/bin/pip install dist/*.whl + + +# Generating the certs inside the container didn't work (error: Certificate verification failed), but the command is kept +# here so we can investigate this issue later on +# RUN cd /venv/lib/python3.10/site-packages/iso15118/shared/pki && ./create_certs.sh -v iso-2 + +# This is not the ideal way to provide the certificate chain to the container, but for now it works +COPY --from=build /usr/src/app/iso15118/shared/pki/ /venv/lib/python3.10/site-packages/iso15118/shared/pki/ + + # This will run the entrypoint script defined in the pyproject.toml CMD /venv/bin/iso15118