From ef72b2ba55e40df4b689d50cf6d17a23a94aaef2 Mon Sep 17 00:00:00 2001 From: Ivan Folgueira Bande Date: Tue, 30 Apr 2024 21:35:52 +0200 Subject: [PATCH] change info to debug the log lines that were previously set to info --- waku/node/waku_node.nim | 10 ++++++---- waku/waku_archive/archive.nim | 4 ++-- waku/waku_filter_v2/protocol.nim | 10 +++++----- waku/waku_lightpush/protocol.nim | 8 ++++---- waku/waku_relay/protocol.nim | 10 +++++----- 5 files changed, 22 insertions(+), 20 deletions(-) diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim index 7762991247..3dd514f6c4 100644 --- a/waku/node/waku_node.nim +++ b/waku/node/waku_node.nim @@ -227,7 +227,7 @@ proc registerRelayDefaultHandler(node: WakuNode, topic: PubsubTopic) = return proc traceHandler(topic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} = - info "waku.relay received", + debug "waku.relay received", my_peer_id = node.peerId, pubsubTopic = topic, msg_hash = topic.computeMessageHash(msg).to0xHex(), @@ -914,7 +914,9 @@ proc mountLightPush*( if publishedCount == 0: ## Agreed change expected to the lightpush protocol to better handle such case. https://github.com/waku-org/pm/issues/93 - info "Lightpush request has not been published to any peers" + let msgHash = computeMessageHash(pubsubTopic, message).to0xHex() + debug "Lightpush request has not been published to any peers", + msg_hash = msgHash return ok() @@ -955,7 +957,7 @@ proc lightpushPublish*( ): Future[WakuLightPushResult[void]] {.async, gcsafe.} = let msgHash = pubsubTopic.computeMessageHash(message).to0xHex() if not node.wakuLightpushClient.isNil(): - info "publishing message with lightpush", + debug "publishing message with lightpush", pubsubTopic = pubsubTopic, contentTopic = message.contentTopic, target_peer_id = peer.peerId, @@ -963,7 +965,7 @@ proc lightpushPublish*( return await node.wakuLightpushClient.publish(pubsubTopic, message, peer) if not node.wakuLightPush.isNil(): - info "publishing message with self hosted lightpush", + debug "publishing message with self hosted lightpush", pubsubTopic = pubsubTopic, contentTopic = message.contentTopic, target_peer_id = peer.peerId, diff --git a/waku/waku_archive/archive.nim b/waku/waku_archive/archive.nim index c66544ecff..1829ff0552 100644 --- a/waku/waku_archive/archive.nim +++ b/waku/waku_archive/archive.nim @@ -115,9 +115,9 @@ proc handleMessage*( (await self.driver.put(pubsubTopic, msg, msgDigest, msgHash, msgTimestamp)).isOkOr: waku_archive_errors.inc(labelValues = [insertFailure]) - debug "failed to insert message", err = error + error "failed to insert message", error = error - info "message archived", + debug "message archived", msg_hash = msgHashHex, pubsubTopic = pubsubTopic, contentTopic = msg.contentTopic, diff --git a/waku/waku_filter_v2/protocol.nim b/waku/waku_filter_v2/protocol.nim index ab0de301b0..a072a99871 100644 --- a/waku/waku_filter_v2/protocol.nim +++ b/waku/waku_filter_v2/protocol.nim @@ -176,7 +176,7 @@ proc pushToPeers( let msgHash = messagePush.pubsubTopic.computeMessageHash(messagePush.wakuMessage).to0xHex() - info "pushing message to subscribed peers", + debug "pushing message to subscribed peers", pubsubTopic = messagePush.pubsubTopic, contentTopic = messagePush.wakuMessage.contentTopic, target_peer_ids = targetPeerIds, @@ -216,7 +216,7 @@ proc handleMessage*( ) {.async.} = let msgHash = computeMessageHash(pubsubTopic, message).to0xHex() - info "handling message", + debug "handling message", pubsubTopic = pubsubTopic, message = message, msg_hash = msgHash let handleMessageStartTime = Moment.now() @@ -226,7 +226,7 @@ proc handleMessage*( let subscribedPeers = wf.subscriptions.findSubscribedPeers(pubsubTopic, message.contentTopic) if subscribedPeers.len == 0: - info "no subscribed peers found", + debug "no subscribed peers found", pubsubTopic = pubsubTopic, contentTopic = message.contentTopic return @@ -243,7 +243,7 @@ proc handleMessage*( target_peer_ids = subscribedPeers.mapIt(shortLog(it)) waku_filter_errors.inc(labelValues = [pushTimeoutFailure]) else: - info "pushed message succesfully to all subscribers", + debug "pushed message succesfully to all subscribers", pubsubTopic = pubsubTopic, contentTopic = message.contentTopic, msg_hash = msgHash, @@ -273,7 +273,7 @@ proc initProtocolHandler(wf: WakuFilter) = let response = wf.handleSubscribeRequest(conn.peerId, request) - info "sending filter subscribe response", + debug "sending filter subscribe response", peer_id = shortLog(conn.peerId), response = response await conn.writeLp(response.encode().buffer) #TODO: toRPC() separation here diff --git a/waku/waku_lightpush/protocol.nim b/waku/waku_lightpush/protocol.nim index a8a2e9a8e7..c2ab01265c 100644 --- a/waku/waku_lightpush/protocol.nim +++ b/waku/waku_lightpush/protocol.nim @@ -28,7 +28,7 @@ type WakuLightPush* = ref object of LPProtocol proc extractInfoFromReq( self: PushRPC -): tuple[reqId: string, pubsubTopic: string, msgHash: string, message: WakuMessage] = +): tuple[requestId: string, pubsubTopic: string, msgHash: string, message: WakuMessage] = ## Simply extract a tuple with the underlying data stored in `PushRPC` let requestId = self.requestId @@ -70,7 +70,7 @@ proc handleRequest*( error "lightpush request rejected due rate limit exceeded", peer_id = peerId, - requestId = reqInfo.reqId, + requestId = reqInfo.requestId, pubsubTopic = reqInfo.pubsubTopic, msg_hash = reqInfo.msgHash @@ -82,7 +82,7 @@ proc handleRequest*( let reqInfo = reqDecodeRes.get().extractInfoFromReq() - requestId = reqInfo.reqId + requestId = reqInfo.requestId pubsubTopic = reqInfo.pubsubTopic msgHash = reqInfo.msgHash @@ -98,7 +98,7 @@ proc handleRequest*( pubsubTopic = pubsubTopic, msg_hash = msgHash, error = pushResponseInfo if isSuccess: - info "lightpush request processed correctly", + debug "lightpush request processed correctly", lightpush_client_peer_id = shortLog(peerId), requestId = requestId, pubsubTopic = pubsubTopic, diff --git a/waku/waku_relay/protocol.nim b/waku/waku_relay/protocol.nim index 2ee980a493..171b1deeb5 100644 --- a/waku/waku_relay/protocol.nim +++ b/waku/waku_relay/protocol.nim @@ -201,12 +201,11 @@ proc generateOrderedValidator(w: WakuRelay): auto {.gcsafe.} = ): Future[ValidationResult] {.async.} = # can be optimized by checking if the message is a WakuMessage without allocating memory # see nim-libp2p protobuf library - let msgRes = WakuMessage.decode(message.data) - if msgRes.isErr(): + let msg = WakuMessage.decode(message.data).valueOr: error "protocol generateOrderedValidator reject decode error", - pubsubTopic = pubsubTopic, error = msgRes.error + pubsubTopic = pubsubTopic, error = $error return ValidationResult.Reject - let msg = msgRes.get() + let msgHash = computeMessageHash(pubsubTopic, msg).to0xHex() # now sequentially validate the message @@ -220,6 +219,7 @@ proc generateOrderedValidator(w: WakuRelay): auto {.gcsafe.} = return validatorRes return ValidationResult.Accept + return wrappedValidator proc validateMessage*( @@ -307,6 +307,6 @@ proc publish*( let data = message.encode().buffer let msgHash = computeMessageHash(pubsubTopic, message).to0xHex() - info "start publish Waku message", msg_hash = msgHash, pubsubTopic = pubsubTopic + debug "start publish Waku message", msg_hash = msgHash, pubsubTopic = pubsubTopic return await procCall GossipSub(w).publish(pubsubTopic, data)