Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: don't use WakuMessageSize in req/resp protocols #2601

Merged
merged 3 commits into from
Apr 20, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 9 additions & 6 deletions tests/waku_filter_v2/test_waku_client.nim
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,9 @@ suite "Waku Filter - End to End":

asyncTest "Subscribing to an empty content topic":
# When subscribing to an empty content topic
let subscribeResponse =
await wakuFilterClient.subscribe(serverRemotePeerInfo, pubsubTopic, newSeq[ContentTopic]())
gabrielmer marked this conversation as resolved.
Show resolved Hide resolved
let subscribeResponse = await wakuFilterClient.subscribe(
serverRemotePeerInfo, pubsubTopic, newSeq[ContentTopic]()
)

# Then the subscription is not successful
check:
Expand Down Expand Up @@ -1838,8 +1839,9 @@ suite "Waku Filter - End to End":
wakuFilter.subscriptions.isSubscribed(clientPeerId)

# When unsubscribing from an empty content topic
let unsubscribeResponse =
await wakuFilterClient.unsubscribe(serverRemotePeerInfo, pubsubTopic, newSeq[ContentTopic]())
let unsubscribeResponse = await wakuFilterClient.unsubscribe(
serverRemotePeerInfo, pubsubTopic, newSeq[ContentTopic]()
)

# Then the unsubscription is not successful
check:
Expand Down Expand Up @@ -2076,10 +2078,11 @@ suite "Waku Filter - End to End":
contentTopic = contentTopic, payload = getByteSequence(100 * 1024)
) # 100KiB
msg4 = fakeWakuMessage(
contentTopic = contentTopic, payload = getByteSequence(MaxPushSize - 1024)
contentTopic = contentTopic,
payload = getByteSequence(DefaultMaxPushSize - 1024),
) # Max Size (Inclusive Limit)
msg5 = fakeWakuMessage(
contentTopic = contentTopic, payload = getByteSequence(MaxPushSize)
contentTopic = contentTopic, payload = getByteSequence(DefaultMaxPushSize)
) # Max Size (Exclusive Limit)

# When sending the 1KiB message
Expand Down
4 changes: 2 additions & 2 deletions tests/waku_lightpush/test_client.nim
Original file line number Diff line number Diff line change
Expand Up @@ -209,11 +209,11 @@ suite "Waku Lightpush Client":
) # 100KiB
message4 = fakeWakuMessage(
contentTopic = contentTopic,
payload = getByteSequence(MaxRpcSize - overheadBytes - 1),
payload = getByteSequence(DefaultMaxWakuMessageSize - overheadBytes - 1),
chaitanyaprem marked this conversation as resolved.
Show resolved Hide resolved
) # Inclusive Limit
message5 = fakeWakuMessage(
contentTopic = contentTopic,
payload = getByteSequence(MaxRpcSize - overheadBytes),
payload = getByteSequence(DefaultMaxWakuMessageSize - overheadBytes),
) # Exclusive Limit

# When publishing the 1KiB payload
Expand Down
2 changes: 1 addition & 1 deletion tests/waku_peer_exchange/test_protocol.nim
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,7 @@ suite "Waku Peer Exchange":

var buffer: seq[byte]
await conn.writeLP(rpc.encode().buffer)
buffer = await conn.readLp(MaxRpcSize.int)
buffer = await conn.readLp(DefaultMaxRpcSize.int)

# Decode the response
let decodedBuff = PeerExchangeRpc.decode(buffer)
Expand Down
13 changes: 7 additions & 6 deletions tests/waku_relay/test_protocol.nim
Original file line number Diff line number Diff line change
Expand Up @@ -1042,14 +1042,15 @@ suite "Waku Relay":
) # 100KiB
msg4 = fakeWakuMessage(
contentTopic = contentTopic,
payload = getByteSequence(MaxWakuMessageSize - sizeEmptyMsg - 38),
payload = getByteSequence(DefaultMaxWakuMessageSize - sizeEmptyMsg - 38),
) # Max Size (Inclusive Limit)
msg5 = fakeWakuMessage(
contentTopic = contentTopic,
payload = getByteSequence(MaxWakuMessageSize - sizeEmptyMsg - 37),
payload = getByteSequence(DefaultMaxWakuMessageSize - sizeEmptyMsg - 37),
) # Max Size (Exclusive Limit)
msg6 = fakeWakuMessage(
contentTopic = contentTopic, payload = getByteSequence(MaxWakuMessageSize)
contentTopic = contentTopic,
payload = getByteSequence(DefaultMaxWakuMessageSize),
) # MaxWakuMessageSize -> Out of Max Size

# Notice that the message is wrapped with more data in https://github.com/status-im/nim-libp2p/blob/3011ba4326fa55220a758838835797ff322619fc/libp2p/protocols/pubsub/gossipsub.nim#L627-L632
Expand Down Expand Up @@ -1092,7 +1093,7 @@ suite "Waku Relay":
(pubsubTopic, msg3) == handlerFuture.read()
(pubsubTopic, msg3) == otherHandlerFuture.read()

# When sending the 'MaxWakuMessageSize - sizeEmptyMsg - 38' message
# When sending the 'DefaultMaxWakuMessageSize - sizeEmptyMsg - 38' message
handlerFuture = newPushHandlerFuture()
otherHandlerFuture = newPushHandlerFuture()
discard await node.publish(pubsubTopic, msg4)
Expand All @@ -1104,7 +1105,7 @@ suite "Waku Relay":
(pubsubTopic, msg4) == handlerFuture.read()
(pubsubTopic, msg4) == otherHandlerFuture.read()

# When sending the 'MaxWakuMessageSize - sizeEmptyMsg - 37' message
# When sending the 'DefaultMaxWakuMessageSize - sizeEmptyMsg - 37' message
handlerFuture = newPushHandlerFuture()
otherHandlerFuture = newPushHandlerFuture()
discard await node.publish(pubsubTopic, msg5)
Expand All @@ -1115,7 +1116,7 @@ suite "Waku Relay":
not await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT)
(pubsubTopic, msg5) == handlerFuture.read()

# When sending the 'MaxWakuMessageSize' message
# When sending the 'DefaultMaxWakuMessageSize' message
handlerFuture = newPushHandlerFuture()
otherHandlerFuture = newPushHandlerFuture()
discard await node.publish(pubsubTopic, msg6)
Expand Down
8 changes: 4 additions & 4 deletions tests/wakunode_rest/test_rest_relay.nim
Original file line number Diff line number Diff line change
Expand Up @@ -596,7 +596,7 @@ suite "Waku v2 Rest API - Relay":
let response = await client.relayPostMessagesV1(
DefaultPubsubTopic,
RelayWakuMessage(
payload: base64.encode(getByteSequence(MaxWakuMessageSize)),
payload: base64.encode(getByteSequence(DefaultMaxWakuMessageSize)),
# Message will be bigger than the max size
contentTopic: some(DefaultContentTopic),
timestamp: some(int64(2022)),
Expand All @@ -608,7 +608,7 @@ suite "Waku v2 Rest API - Relay":
response.status == 400
$response.contentType == $MIMETYPE_TEXT
response.data ==
fmt"Failed to publish: Message size exceeded maximum of {MaxWakuMessageSize} bytes"
fmt"Failed to publish: Message size exceeded maximum of {DefaultMaxWakuMessageSize} bytes"

await restServer.stop()
await restServer.closeWait()
Expand Down Expand Up @@ -657,7 +657,7 @@ suite "Waku v2 Rest API - Relay":
# When
let response = await client.relayPostAutoMessagesV1(
RelayWakuMessage(
payload: base64.encode(getByteSequence(MaxWakuMessageSize)),
payload: base64.encode(getByteSequence(DefaultMaxWakuMessageSize)),
# Message will be bigger than the max size
contentTopic: some(DefaultContentTopic),
timestamp: some(int64(2022)),
Expand All @@ -669,7 +669,7 @@ suite "Waku v2 Rest API - Relay":
response.status == 400
$response.contentType == $MIMETYPE_TEXT
response.data ==
fmt"Failed to publish: Message size exceeded maximum of {MaxWakuMessageSize} bytes"
fmt"Failed to publish: Message size exceeded maximum of {DefaultMaxWakuMessageSize} bytes"

await restServer.stop()
await restServer.closeWait()
Expand Down
9 changes: 5 additions & 4 deletions waku/factory/node_factory.nim
Original file line number Diff line number Diff line change
Expand Up @@ -140,14 +140,15 @@ proc setupProtocols(

peerExchangeHandler = some(handlePeerExchange)

#if none of the protos are enabled, still parsing this..but its ok i guess.
let parsedMaxMsgSize = parseMsgSize(conf.maxMessageSize).valueOr:
return err("failed to parse 'max-num-bytes-msg-size' param: " & $error)

if conf.relay:
let shards =
conf.contentTopics.mapIt(node.wakuSharding.getShard(it).expect("Valid Shard"))
let pubsubTopics = conf.pubsubTopics & shards

let parsedMaxMsgSize = parseMsgSize(conf.maxMessageSize).valueOr:
return err("failed to parse 'max-num-bytes-msg-size' param: " & $error)

debug "Setting max message size", num_bytes = parsedMaxMsgSize

try:
Expand Down Expand Up @@ -261,7 +262,7 @@ proc setupProtocols(
try:
let rateLimitSetting: RateLimitSetting =
(conf.requestRateLimit, chronos.seconds(conf.requestRatePeriod))
await mountLightPush(node, rateLimitSetting)
await mountLightPush(node, rateLimitSetting, int(parsedMaxMsgSize))
except CatchableError:
return err("failed to mount waku lightpush protocol: " & getCurrentExceptionMsg())

Expand Down
11 changes: 7 additions & 4 deletions waku/node/waku_node.nim
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,7 @@ proc mountRelay*(
node: WakuNode,
pubsubTopics: seq[string] = @[],
peerExchangeHandler = none(RoutingRecordsHandler),
maxMessageSize = int(MaxWakuMessageSize),
maxMessageSize = int(DefaultMaxWakuMessageSize),
) {.async, gcsafe.} =
if not node.wakuRelay.isNil():
error "wakuRelay already mounted, skipping"
Expand Down Expand Up @@ -793,7 +793,9 @@ when defined(waku_exp_store_resume):
## Waku lightpush

proc mountLightPush*(
node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit
node: WakuNode,
rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit,
maxMessageSize = int(DefaultMaxWakuMessageSize),
) {.async.} =
info "mounting light push"

Expand All @@ -818,8 +820,9 @@ proc mountLightPush*(
return ok()

debug "mounting lightpush with relay"
node.wakuLightPush =
WakuLightPush.new(node.peerManager, node.rng, pushHandler, some(rateLimit))
node.wakuLightPush = WakuLightPush.new(
node.peerManager, node.rng, pushHandler, some(rateLimit), maxMessageSize
)

if node.started:
# Node has started already. Let's start lightpush too.
Expand Down
2 changes: 1 addition & 1 deletion waku/waku_core/message/default_values.nim
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,6 @@ import ../../common/utils/parse_size_units
const
## https://rfc.vac.dev/spec/64/#message-size
DefaultMaxWakuMessageSizeStr* = "150KiB" # Remember that 1 MiB is the PubSub default
MaxWakuMessageSize* = parseCorrectMsgSize(DefaultMaxWakuMessageSizeStr)
DefaultMaxWakuMessageSize* = parseCorrectMsgSize(DefaultMaxWakuMessageSizeStr)

DefaultSafetyBufferProtocolOverhead* = 64 * 1024 # overhead measured in bytes
12 changes: 6 additions & 6 deletions waku/waku_filter_v2/client.nim
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ proc sendSubscribeRequest(
# TODO: this can raise an exception
await connection.writeLP(filterSubscribeRequest.encode().buffer)

let respBuf = await connection.readLp(MaxSubscribeResponseSize)
let respBuf = await connection.readLp(DefaultMaxSubscribeResponseSize)
let respDecodeRes = FilterSubscribeResponse.decode(respBuf)
if respDecodeRes.isErr():
trace "Failed to decode filter subscribe response", servicePeer
Expand Down Expand Up @@ -79,7 +79,7 @@ proc subscribe*(
wfc: WakuFilterClient,
servicePeer: RemotePeerInfo,
pubsubTopic: PubsubTopic,
contentTopics: ContentTopic|seq[ContentTopic],
contentTopics: ContentTopic | seq[ContentTopic],
): Future[FilterSubscribeResult] {.async.} =
var contentTopicSeq: seq[ContentTopic]
when contentTopics is seq[ContentTopic]:
Expand All @@ -98,14 +98,14 @@ proc unsubscribe*(
wfc: WakuFilterClient,
servicePeer: RemotePeerInfo,
pubsubTopic: PubsubTopic,
contentTopics: ContentTopic|seq[ContentTopic],
contentTopics: ContentTopic | seq[ContentTopic],
): Future[FilterSubscribeResult] {.async.} =
var contentTopicSeq: seq[ContentTopic]
when contentTopics is seq[ContentTopic]:
contentTopicSeq = contentTopics
else:
contentTopicSeq = @[contentTopics]

let requestId = generateRequestId(wfc.rng)
let filterSubscribeRequest = FilterSubscribeRequest.unsubscribe(
requestId = requestId, pubsubTopic = pubsubTopic, contentTopics = contentTopicSeq
Expand All @@ -127,7 +127,7 @@ proc registerPushHandler*(wfc: WakuFilterClient, handler: FilterPushHandler) =

proc initProtocolHandler(wfc: WakuFilterClient) =
proc handler(conn: Connection, proto: string) {.async.} =
let buf = await conn.readLp(int(MaxPushSize))
let buf = await conn.readLp(int(DefaultMaxPushSize))

let decodeRes = MessagePush.decode(buf)
if decodeRes.isErr():
Expand All @@ -152,4 +152,4 @@ proc new*(
): T =
let wfc = WakuFilterClient(rng: rng, peerManager: peerManager, pushHandlers: @[])
wfc.initProtocolHandler()
wfc
wfc
2 changes: 1 addition & 1 deletion waku/waku_filter_v2/protocol.nim
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ proc initProtocolHandler(wf: WakuFilter) =
proc handler(conn: Connection, proto: string) {.async.} =
trace "filter subscribe request handler triggered", peerId = conn.peerId

let buf = await conn.readLp(int(MaxSubscribeSize))
let buf = await conn.readLp(int(DefaultMaxSubscribeSize))

let decodeRes = FilterSubscribeRequest.decode(buf)
if decodeRes.isErr():
Expand Down
6 changes: 3 additions & 3 deletions waku/waku_filter_v2/rpc_codec.nim
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@ import std/options
import ../common/protobuf, ../waku_core, ./rpc

const
MaxSubscribeSize* = 10 * MaxWakuMessageSize + 64 * 1024
DefaultMaxSubscribeSize* = 10 * DefaultMaxWakuMessageSize + 64 * 1024
# We add a 64kB safety buffer for protocol overhead
MaxSubscribeResponseSize* = 64 * 1024 # Responses are small. 64kB safety buffer.
MaxPushSize* = 10 * MaxWakuMessageSize + 64 * 1024
DefaultMaxSubscribeResponseSize* = 64 * 1024 # Responses are small. 64kB safety buffer.
DefaultMaxPushSize* = 10 * DefaultMaxWakuMessageSize + 64 * 1024
# We add a 64kB safety buffer for protocol overhead

proc encode*(rpc: FilterSubscribeRequest): ProtoBuffer =
Expand Down
2 changes: 1 addition & 1 deletion waku/waku_lightpush/client.nim
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ proc sendPushRequest(

var buffer: seq[byte]
try:
buffer = await connection.readLp(MaxRpcSize.int)
buffer = await connection.readLp(DefaultMaxRpcSize.int)
except LPStreamRemoteClosedError:
return err("Exception reading: " & getCurrentExceptionMsg())

Expand Down
5 changes: 4 additions & 1 deletion waku/waku_lightpush/protocol.nim
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ type WakuLightPush* = ref object of LPProtocol
peerManager*: PeerManager
pushHandler*: PushMessageHandler
requestRateLimiter*: Option[TokenBucket]
maxRPCSize*: int

proc handleRequest*(
wl: WakuLightPush, peerId: PeerId, buffer: seq[byte]
Expand Down Expand Up @@ -79,7 +80,7 @@ proc handleRequest*(

proc initProtocolHandler(wl: WakuLightPush) =
proc handle(conn: Connection, proto: string) {.async.} =
let buffer = await conn.readLp(MaxRpcSize.int)
let buffer = await conn.readLp(wl.maxRpcSize)
chaitanyaprem marked this conversation as resolved.
Show resolved Hide resolved
let rpc = await handleRequest(wl, conn.peerId, buffer)
await conn.writeLp(rpc.encode().buffer)

Expand All @@ -92,12 +93,14 @@ proc new*(
rng: ref rand.HmacDrbgContext,
pushHandler: PushMessageHandler,
rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](),
maxMessageSize = int(DefaultMaxWakuMessageSize),
): T =
let wl = WakuLightPush(
rng: rng,
peerManager: peerManager,
pushHandler: pushHandler,
requestRateLimiter: newTokenBucket(rateLimitSetting),
maxRPCSize: calculateRPCSize(maxMessageSize),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think adding state and dynamic calculation here is overkill. The idea of the max stream read size was simply to have some reasonable safety limit that will prevent unrealistically (read: adversarially) large requests (which presumably could be an attack vector). We could have just used a very, very big magic number, but opted to initialise our constants with some value based off the MaxWakuMsgSize with safety margins to show that there was some rhyme and reason behind our thinking. Because this max msg size value is now configurable, our generous high limit may indeed now not be generous enough for differently configured nodes - at least in the lightpush case. However, even here the node could likely not configure message size to anything larger than the underlying libp2p limit (of 1MB). If we want to change this to be future proof, I'd rather keep using the underlying libp2p constant (1 MB) for all nodes and keep the simplicity of pure constants or just ignore the max rpc size limit (set to -1 all around). The latter could presumably expose node to more attacks, though.

Copy link
Contributor Author

@chaitanyaprem chaitanyaprem Apr 18, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yep, realised this ... will change it to -1.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The latter could presumably expose node to more attacks, though

I am wondering what kind of attacks, because read would anyways be done by the libp2p layer ir-respective of length passed. It is only the length check for the message that is done based on what is passed to it.
We could keep it to 1MB, but then it is going to cause issues for protocols like Store where response can include 100*WakuMessageSize bytes which would definitely be more than 1MB even with default value.

If we really have to deal with attacks, then maybe there should be some sort of length negotiation as part of metadata protocol or some other protocol when nodes connect to each other. This i feel is overkill at this point and can be taken up if we notice any vulnerabilities.

Copy link
Contributor Author

@chaitanyaprem chaitanyaprem Apr 19, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

owever, even here the node could likely not configure message size to anything larger than the underlying libp2p limit (of 1MB).

Also, if this is a hard limit set by libp2p...then maybe we shouldn't let users configure anything more than 1MB-WakuHeaders as maxMsgSize.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am wondering what kind of attacks

Mmm. Indeed, not sure if possible impacts. It seems to me if someone were to pack the length prefix (or just send a very large amount of bytes), we'd have some potential to DoS the stream. This is a sanity check for the buffer length and if not reasonable the stream reader exits: https://github.com/vacp2p/nim-libp2p/blob/b30b2656d52ee304bd56f8f8bbf59ab82b658a36/libp2p/stream/lpstream.nim#L238-L239

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We could keep it to 1MB, but then it is going to cause issues for protocols like Store where response can include 100*WakuMessageSize bytes which would definitely be more than 1MB even with default value.

Which is why for store the max rpc size is MaxPageSize*MaxWakuMsgSize + safety_margin. Agree though. This is complicated by configurable sizes.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This i feel is overkill at this point and can be taken up if we notice any vulnerabilities.

Agree.

)
wl.initProtocolHandler()
return wl
5 changes: 4 additions & 1 deletion waku/waku_lightpush/rpc_codec.nim
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,11 @@ else:
import std/options
import ../common/protobuf, ../waku_core, ./rpc

const MaxRpcSize* = MaxWakuMessageSize + 64 * 1024
proc calculateRPCSize*(msgSize: int): int =
# We add a 64kB safety buffer for protocol overhead
return msgSize + 64 * 1024

const DefaultMaxRpcSize* = -1

proc encode*(rpc: PushRequest): ProtoBuffer =
var pb = initProtoBuffer()
Expand Down
6 changes: 3 additions & 3 deletions waku/waku_peer_exchange/protocol.nim
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ logScope:
const
# We add a 64kB safety buffer for protocol overhead.
# 10x-multiplier also for safety
MaxRpcSize* = 10 * MaxWakuMessageSize + 64 * 1024
DefaultMaxRpcSize* = 10 * DefaultMaxWakuMessageSize + 64 * 1024
# TODO what is the expected size of a PX message? As currently specified, it can contain an arbitary number of ENRs...
MaxPeersCacheSize = 60
CacheRefreshInterval = 15.minutes
Expand Down Expand Up @@ -61,7 +61,7 @@ proc request*(
var error: string
try:
await conn.writeLP(rpc.encode().buffer)
buffer = await conn.readLp(MaxRpcSize.int)
buffer = await conn.readLp(DefaultMaxRpcSize.int)
except CatchableError as exc:
waku_px_errors.inc(labelValues = [exc.msg])
error = $exc.msg
Expand Down Expand Up @@ -153,7 +153,7 @@ proc initProtocolHandler(wpx: WakuPeerExchange) =
proc handler(conn: Connection, proto: string) {.async, gcsafe, closure.} =
var buffer: seq[byte]
try:
buffer = await conn.readLp(MaxRpcSize.int)
buffer = await conn.readLp(DefaultMaxRpcSize.int)
except CatchableError as exc:
waku_px_errors.inc(labelValues = [exc.msg])
return
Expand Down
2 changes: 1 addition & 1 deletion waku/waku_relay/protocol.nim
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ proc initProtocolHandler(w: WakuRelay) =
w.codec = WakuRelayCodec

proc new*(
T: type WakuRelay, switch: Switch, maxMessageSize = int(MaxWakuMessageSize)
T: type WakuRelay, switch: Switch, maxMessageSize = int(DefaultMaxWakuMessageSize)
): WakuRelayResult[T] =
## maxMessageSize: max num bytes that are allowed for the WakuMessage

Expand Down
4 changes: 3 additions & 1 deletion waku/waku_store/client.nim
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,9 @@ proc sendHistoryQueryRPC(
let reqRpc = HistoryRPC(requestId: generateRequestId(w.rng), query: some(req.toRPC()))
await connection.writeLP(reqRpc.encode().buffer)

let buf = await connection.readLp(MaxRpcSize.int)
#TODO: I see a challenge here, if storeNode uses a different MaxRPCSize this read will fail.
# Need to find a workaround for this.
let buf = await connection.readLp(DefaultMaxRpcSize.int)
let respDecodeRes = HistoryRPC.decode(buf)
if respDecodeRes.isErr():
waku_store_errors.inc(labelValues = [decodeRpcFailure])
Expand Down
2 changes: 1 addition & 1 deletion waku/waku_store/protocol.nim
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ type WakuStore* = ref object of LPProtocol

proc initProtocolHandler(ws: WakuStore) =
proc handler(conn: Connection, proto: string) {.async.} =
let buf = await conn.readLp(MaxRpcSize.int)
let buf = await conn.readLp(DefaultMaxRpcSize.int)

let decodeRes = HistoryRPC.decode(buf)
if decodeRes.isErr():
Expand Down
Loading
Loading