Skip to content

Commit

Permalink
Merge branch 'source-repo'
Browse files Browse the repository at this point in the history
  • Loading branch information
glimchb committed Oct 21, 2022
2 parents a74b59d + 1e3e2f4 commit 40983ef
Show file tree
Hide file tree
Showing 29 changed files with 2,752 additions and 2 deletions.
Binary file added OPI-Storage-Layers-Detailed.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added OPI-Storage-Sequence.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added OPI-storage-SPDK-bridge.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
210 changes: 208 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,2 +1,208 @@
# opi-spdk-bridge
OPI Storage gRPC to SPDK json-rcp bridge
# Storage

This is a simple SPDK based storage API PoC.

* SPDK - container with SPDK app that is running on xPU
* Server - container with OPI gRPC storage APIs to SPDK json-rpc APIs bridge
* Client - container with OPI gRPC client for testing of the above server/bridge

## Docs

* [JSON RPC Proxy](https://spdk.io/doc/jsonrpc_proxy.html)
* [SPDK SMA](https://github.com/spdk/spdk/tree/master/python/spdk/sma)
* [SPDK CSI](https://github.com/spdk/spdk-csi/blob/master/deploy/spdk/Dockerfile)
* [CSI Spec](https://github.com/container-storage-interface/spec/blob/master/spec.md)

## OPI-SPDK Bridge Block Diagram

The following is the example architecture we envision for the OPI Storage
SPDK bridge APIs. It utilizes SPDK to handle storage services,
and the configuration is handled by standard JSON-RPC based APIs
see <https://spdk.io/doc/jsonrpc.html>

We recongnise, not all companies use SPDK, so for them only PROTOBUF definitions
are going to be the OPI conumable product. For those that wish to use SPDK, this
is a refernce implementation not intended to use in production.

![OPI Storage SPDK bridge/server](OPI-storage-SPDK-bridge.png)

## OPI-SPDK Bridge Sequence Diagram

The following is the example sequence diagram for OPI-SPDK bridge APIs.
It is just an example and implies SPDK just as example, not mandated by OPI.

![OPI Storage SPDK bridge/server](OPI-Storage-Sequence.png)

## Getting started

* [Setup everything once using ansible](../setup)
* Run `docker-compose up -d`

## QEMU example

[OPI Storage QEMU SPDK Setup](qemu_spdk_setup.md)

## Real DPU/IPU example

on DPU/IPU (i.e. with IP=10.10.10.1) run

```bash
$ docker run --rm -it -v /var/tmp/:/var/tmp/ -p 50051:50051 ghcr.io/opiproject/opi-storage-server:main
2022/09/21 21:39:49 server listening at [::]:50051
```

on X86 management VM run

```bash
docker run --network=host --rm -it namely/grpc-cli call --json_input --json_output 10.10.10.1:50051 NVMeSubsystemCreate "{'subsystem' : {'nqn' : 'Opi1'} }"
docker run --network=host --rm -it namely/grpc-cli call --json_input --json_output 10.10.10.1:50051 NVMeControllerCreate "{'controller' : {'name' : 'Opi2' , 'subsystem_id' : '1'} }"
docker run --network=host --rm -it namely/grpc-cli call --json_input --json_output 10.10.10.1:50051 NVMeNamespaceCreate "{'namespace' : {'name' : 'Opi3' , 'controller_id' : '2' , 'subsystem_id' : '1'} }"
```

and netwok-facing APIs:

```bash
docker run --network=host --rm -it namely/grpc-cli call --json_input --json_output 10.10.10.1:50051 NVMfRemoteControllerConnect "{'ctrl' : {'id': '12', 'traddr':'11.11.11.2', 'subnqn':'nqn.2016-06.com.opi.spdk.target0', 'trsvcid':'4444'}}"
connecting to 10.10.10.1:50051
{}
Rpc succeeded with OK status

docker run --network=host --rm -it namely/grpc-cli call --json_input --json_output 10.10.10.1:50051 NVMfRemoteControllerGet "{'id': '12'}"
connecting to 10.10.10.1:50051
{
"ctrl": {
"subnqn": "OpiNvme12"
}
}
Rpc succeeded with OK status

docker run --network=host --rm -it namely/grpc-cli call --json_input --json_output 10.10.10.1:50051 NVMfRemoteControllerDisconnect "{'id': '12'}"
connecting to 10.10.10.1:50051
{}
Rpc succeeded with OK status
```

## Test SPDK is up

```bash
curl -k --user spdkuser:spdkpass -X POST -H "Content-Type: application/json" -d '{"id": 1, "method": "bdev_get_bdevs", "params": {"name": "Malloc0"}}' http://127.0.0.1:9009/
```

## gRPC CLI examples

From <https://github.com/grpc/grpc-go/blob/master/Documentation/server-reflection-tutorial.md>

Alias

```bash
alias grpc_cli='docker run --network=storage_opi --rm -it namely/grpc-cli'
```

See services

```bash
$ grpc_cli ls opi-spdk-server:50051
grpc.reflection.v1alpha.ServerReflection
opi_api.storage.v1.AioControllerService
opi_api.storage.v1.NVMeControllerService
opi_api.storage.v1.NVMeNamespaceService
opi_api.storage.v1.NVMeSubsystemService
opi_api.storage.v1.NVMfRemoteControllerService
opi_api.storage.v1.NullDebugService
opi_api.storage.v1.VirtioBlkService
opi_api.storage.v1.VirtioScsiControllerService
opi_api.storage.v1.VirtioScsiLunService
```

See commands

```bash
$ grpc_cli ls opi-spdk-server:50051 opi_api.storage.v1.NVMeControllerService -l
filename: frontend.proto
package: opi_api.storage.v1;
service NVMeControllerService {
rpc NVMeControllerCreate(opi_api.storage.v1.NVMeControllerCreateRequest) returns (opi_api.storage.v1.NVMeControllerCreateResponse) {}
rpc NVMeControllerDelete(opi_api.storage.v1.NVMeControllerDeleteRequest) returns (opi_api.storage.v1.NVMeControllerDeleteResponse) {}
rpc NVMeControllerUpdate(opi_api.storage.v1.NVMeControllerUpdateRequest) returns (opi_api.storage.v1.NVMeControllerUpdateResponse) {}
rpc NVMeControllerList(opi_api.storage.v1.NVMeControllerListRequest) returns (opi_api.storage.v1.NVMeControllerListResponse) {}
rpc NVMeControllerGet(opi_api.storage.v1.NVMeControllerGetRequest) returns (opi_api.storage.v1.NVMeControllerGetResponse) {}
rpc NVMeControllerStats(opi_api.storage.v1.NVMeControllerStatsRequest) returns (opi_api.storage.v1.NVMeControllerStatsResponse) {}
}
```

See methods

```bash
grpc_cli ls opi-spdk-server:50051 opi_api.storage.v1.NVMeControllerService.NVMeControllerCreate -l
rpc NVMeControllerCreate(opi_api.storage.v1.NVMeControllerCreateRequest) returns (opi_api.storage.v1.NVMeControllerCreateResponse) {}
```

See messages

```bash
$ grpc_cli type opi-spdk-server:50051 opi_api.storage.v1.NVMeController
message NVMeController {
int64 id = 1 [json_name = "id"];
string name = 2 [json_name = "name"];
string subsystem_id = 3 [json_name = "subsystemId"];
.opi_api.storage.v1.NvmeControllerPciId pcie_id = 4 [json_name = "pcieId"];
int64 max_io_qps = 5 [json_name = "maxIoQps"];
int64 max_ns = 6 [json_name = "maxNs"];
}

$ grpc_cli type opi-spdk-server:50051 opi_api.storage.v1.NvmeControllerPciId
message NvmeControllerPciId {
uint32 bus = 1 [json_name = "bus"];
uint32 device = 2 [json_name = "device"];
uint32 function = 3 [json_name = "function"];
uint32 virtual_function = 4 [json_name = "virtualFunction"];
}
```

Call remote method

```bash
$ grpc_cli call --json_input --json_output opi-spdk-server:50051 NVMeControllerDelete "{subsystem_id: 8}"
connecting to opi-spdk-server:50051
{}
Rpc succeeded with OK status
```

Server log

```bash
opi-spdk-server_1 | 2022/08/05 14:31:14 server listening at [::]:50051
opi-spdk-server_1 | 2022/08/05 14:39:40 NVMeSubsystemDelete: Received from client: id:8
opi-spdk-server_1 | 2022/08/05 14:39:40 Sending to SPDK: {"jsonrpc":"2.0","id":1,"method":"bdev_malloc_delete","params":{"name":"OpiMalloc8"}}
opi-spdk-server_1 | 2022/08/05 14:39:40 Received from SPDK: {1 {-19 No such device} 0xc000029f4e}
opi-spdk-server_1 | 2022/08/05 14:39:40 error: bdev_malloc_delete: json response error: No such device
opi-spdk-server_1 | 2022/08/05 14:39:40 Received from SPDK: false
opi-spdk-server_1 | 2022/08/05 14:39:40 Could not delete: id:8
```

Another remote call example

```bash
$ grpc_cli call --json_input --json_output opi-spdk-server:50051 NVMeSubsystemList {}
connecting to opi-spdk-server:50051
{
"subsystem": [
{
"nqn": "nqn.2014-08.org.nvmexpress.discovery"
},
{
"nqn": "nqn.2016-06.io.spdk:cnode1"
}
]
}
Rpc succeeded with OK status
```

Another Server log

```bash
2022/09/21 19:38:26 NVMeSubsystemList: Received from client:
2022/09/21 19:38:26 Sending to SPDK: {"jsonrpc":"2.0","id":1,"method":"bdev_get_bdevs"}
2022/09/21 19:38:26 Received from SPDK: {1 {0 } 0x40003de660}
2022/09/21 19:38:26 Received from SPDK: [{Malloc0 512 131072 08cd0d67-eb57-41c2-957b-585faed7d81a} {Malloc1 512 131072 78c4b40f-dd16-42c1-b057-f95c11db7aaf}]
```
19 changes: 19 additions & 0 deletions client/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# syntax=docker/dockerfile:1

# Alpine is chosen for its small footprint
# compared to Ubuntu
FROM docker.io/library/golang:1.19.2-alpine

WORKDIR /app

# Download necessary Go modules
COPY go.mod ./
COPY go.sum ./
RUN go mod download

# build an app
COPY *.go ./
RUN go build -o /opi-storage-client

EXPOSE 50051
CMD [ "/opi-storage-client" ]
3 changes: 3 additions & 0 deletions client/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# OPI Storage API Client Prototype

This directory contains an example gRPC client for the OPI Storage APIs.
119 changes: 119 additions & 0 deletions client/backend.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
// The main package of the storage client
package main

import (
"context"
"log"
"net"

pc "github.com/opiproject/opi-api/common/v1/gen/go"
pb "github.com/opiproject/opi-api/storage/v1/gen/go"
"google.golang.org/grpc"
)

func doBackend(ctx context.Context, conn grpc.ClientConnInterface) {
// NVMfRemoteController
c4 := pb.NewNVMfRemoteControllerServiceClient(conn)
addr, err := net.LookupIP("spdk")
if err != nil {
log.Fatalf("could not find SPDK IP address")
}
rr0, err := c4.NVMfRemoteControllerConnect(ctx, &pb.NVMfRemoteControllerConnectRequest{Ctrl: &pb.NVMfRemoteController{Id: 8, Traddr: addr[0].String(), Trsvcid: 4444, Subnqn: "nqn.2016-06.io.spdk:cnode1"}})
if err != nil {
log.Fatalf("could not connect to Remote NVMf controller: %v", err)
}
log.Printf("Connected: %v", rr0)
rr2, err := c4.NVMfRemoteControllerReset(ctx, &pb.NVMfRemoteControllerResetRequest{Id: 8})
if err != nil {
log.Fatalf("could not reset Remote NVMf controller: %v", err)
}
log.Printf("Reset: %v", rr2)
rr3, err := c4.NVMfRemoteControllerList(ctx, &pb.NVMfRemoteControllerListRequest{Id: 8})
if err != nil {
log.Fatalf("could not list Remote NVMf controllerd: %v", err)
}
log.Printf("List: %v", rr3)
rr4, err := c4.NVMfRemoteControllerGet(ctx, &pb.NVMfRemoteControllerGetRequest{Id: 8})
if err != nil {
log.Fatalf("could not get Remote NVMf controller: %v", err)
}
log.Printf("Got: %v", rr4)
rr5, err := c4.NVMfRemoteControllerStats(ctx, &pb.NVMfRemoteControllerStatsRequest{Id: 8})
if err != nil {
log.Fatalf("could not stats from Remote NVMf controller: %v", err)
}
log.Printf("Stats: %v", rr5)
rr1, err := c4.NVMfRemoteControllerDisconnect(ctx, &pb.NVMfRemoteControllerDisconnectRequest{Id: 8})
if err != nil {
log.Fatalf("could not disconnect from Remote NVMf controller: %v", err)
}
log.Printf("Disconnected: %v", rr1)

// NullDebug
c1 := pb.NewNullDebugServiceClient(conn)
log.Printf("Testing NewNullDebugServiceClient")
rs1, err := c1.NullDebugCreate(ctx, &pb.NullDebugCreateRequest{Device: &pb.NullDebug{Name: "OpiNull9"}})
if err != nil {
log.Fatalf("could not create NULL device: %v", err)
}
log.Printf("Added: %v", rs1)
rs3, err := c1.NullDebugUpdate(ctx, &pb.NullDebugUpdateRequest{Device: &pb.NullDebug{Name: "OpiNull9"}})
if err != nil {
log.Fatalf("could not update NULL device: %v", err)
}
log.Printf("Updated: %v", rs3)
rs4, err := c1.NullDebugList(ctx, &pb.NullDebugListRequest{})
if err != nil {
log.Fatalf("could not list NULL device: %v", err)
}
log.Printf("Listed: %v", rs4)
rs5, err := c1.NullDebugGet(ctx, &pb.NullDebugGetRequest{Id: 9})
if err != nil {
log.Fatalf("could not get NULL device: %v", err)
}
log.Printf("Got: %s", rs5.Device.Name)
rs6, err := c1.NullDebugStats(ctx, &pb.NullDebugStatsRequest{Id: 9})
if err != nil {
log.Fatalf("could not stats NULL device: %v", err)
}
log.Printf("Stats: %s", rs6.Stats)
rs2, err := c1.NullDebugDelete(ctx, &pb.NullDebugDeleteRequest{Id: 9})
if err != nil {
log.Fatalf("could not delete NULL device: %v", err)
}
log.Printf("Deleted: %v", rs2)

// Aio
c2 := pb.NewAioControllerServiceClient(conn)
log.Printf("Testing NewAioControllerServiceClient")
ra1, err := c2.AioControllerCreate(ctx, &pb.AioControllerCreateRequest{Device: &pb.AioController{Name: "OpiAio4", Filename: "/tmp/aio_bdev_file"}})
if err != nil {
log.Fatalf("could not create Aio device: %v", err)
}
log.Printf("Added: %v", ra1)
ra3, err := c2.AioControllerUpdate(ctx, &pb.AioControllerUpdateRequest{Device: &pb.AioController{Name: "OpiAio4", Filename: "/tmp/aio_bdev_file"}})
if err != nil {
log.Fatalf("could not update Aio device: %v", err)
}
log.Printf("Updated: %v", ra3)
ra4, err := c2.AioControllerGetList(ctx, &pb.AioControllerGetListRequest{})
if err != nil {
log.Fatalf("could not list Aio device: %v", err)
}
log.Printf("Listed: %v", ra4)
ra5, err := c2.AioControllerGet(ctx, &pb.AioControllerGetRequest{Handle: &pc.ObjectKey{Value: "4"}})
if err != nil {
log.Fatalf("could not get Aio device: %v", err)
}
log.Printf("Got: %s", ra5.Name)
ra6, err := c2.AioControllerGetStats(ctx, &pb.AioControllerGetStatsRequest{Handle: &pc.ObjectKey{Value: "4"}})
if err != nil {
log.Fatalf("could not stats Aio device: %v", err)
}
log.Printf("Stats: %s", ra6.Stats)
ra2, err := c2.AioControllerDelete(ctx, &pb.AioControllerDeleteRequest{Handle: &pc.ObjectKey{Value: "4"}})
if err != nil {
log.Fatalf("could not delete Aio device: %v", err)
}
log.Printf("Deleted: %v", ra2)
}
Loading

0 comments on commit 40983ef

Please sign in to comment.