From 6a105b19dcc84da4229615dc414d0c9a5e549be1 Mon Sep 17 00:00:00 2001 From: Mark Stapp Date: Thu, 26 Sep 2024 11:09:35 -0400 Subject: [PATCH 1/5] bgpd: Replace per-peer connection error with per-bgp Replace the per-peer connection error with a per-bgp event and a list. The io pthread enqueues peers per-bgp-instance, and the error-handing code can process multiple peers if there have been multiple failures. Signed-off-by: Mark Stapp --- bgpd/bgp_io.c | 4 +-- bgpd/bgp_packet.c | 69 +++++++++++++++++++++++++------------ bgpd/bgpd.c | 88 +++++++++++++++++++++++++++++++++++++++++++++++ bgpd/bgpd.h | 56 +++++++++++++++++++++++++++++- 4 files changed, 191 insertions(+), 26 deletions(-) diff --git a/bgpd/bgp_io.c b/bgpd/bgp_io.c index 9e9251c85459..a00fed0973e9 100644 --- a/bgpd/bgp_io.c +++ b/bgpd/bgp_io.c @@ -100,7 +100,6 @@ void bgp_reads_off(struct peer_connection *connection) event_cancel_async(fpt->master, &connection->t_read, NULL); EVENT_OFF(connection->t_process_packet); - EVENT_OFF(connection->t_process_packet_error); UNSET_FLAG(connection->thread_flags, PEER_THREAD_READS_ON); } @@ -252,8 +251,7 @@ static void bgp_process_reads(struct event *thread) /* Handle the error in the main pthread, include the * specific state change from 'bgp_read'. */ - event_add_event(bm->master, bgp_packet_process_error, connection, - code, &connection->t_process_packet_error); + bgp_enqueue_conn_err_peer(peer->bgp, connection->peer, code); goto done; } diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c index a76a300c11bc..1bddd543c658 100644 --- a/bgpd/bgp_packet.c +++ b/bgpd/bgp_packet.c @@ -4198,35 +4198,60 @@ void bgp_send_delayed_eor(struct bgp *bgp) } /* - * Task callback to handle socket error encountered in the io pthread. We avoid - * having the io pthread try to enqueue fsm events or mess with the peer - * struct. + * Task callback in the main pthread to handle socket error + * encountered in the io pthread. We avoid having the io pthread try + * to enqueue fsm events or mess with the peer struct. */ + +/* Max number of peers to process without rescheduling */ +#define BGP_CONN_ERROR_DEQUEUE_MAX 10 + void bgp_packet_process_error(struct event *thread) { struct peer_connection *connection; struct peer *peer; - int code; + struct bgp *bgp; + int counter = 0; + bool more_p = false; - connection = EVENT_ARG(thread); - peer = connection->peer; - code = EVENT_VAL(thread); + bgp = EVENT_ARG(thread); - if (bgp_debug_neighbor_events(peer)) - zlog_debug("%s [Event] BGP error %d on fd %d", peer->host, code, - connection->fd); - - /* Closed connection or error on the socket */ - if (peer_established(connection)) { - if ((CHECK_FLAG(peer->flags, PEER_FLAG_GRACEFUL_RESTART) - || CHECK_FLAG(peer->flags, - PEER_FLAG_GRACEFUL_RESTART_HELPER)) - && CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_MODE)) { - peer->last_reset = PEER_DOWN_NSF_CLOSE_SESSION; - SET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT); - } else - peer->last_reset = PEER_DOWN_CLOSE_SESSION; + /* Dequeue peers from the error list */ + while ((peer = bgp_dequeue_conn_err_peer(bgp, &more_p)) != NULL) { + connection = peer->connection; + + if (bgp_debug_neighbor_events(peer)) + zlog_debug("%s [Event] BGP error %d on fd %d", + peer->host, peer->connection_errcode, + connection->fd); + + /* Closed connection or error on the socket */ + if (peer_established(connection)) { + if ((CHECK_FLAG(peer->flags, PEER_FLAG_GRACEFUL_RESTART) + || CHECK_FLAG(peer->flags, + PEER_FLAG_GRACEFUL_RESTART_HELPER)) + && CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_MODE)) { + peer->last_reset = PEER_DOWN_NSF_CLOSE_SESSION; + SET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT); + } else + peer->last_reset = PEER_DOWN_CLOSE_SESSION; + } + + /* No need for keepalives, if enabled */ + bgp_keepalives_off(connection); + + bgp_event_update(connection, peer->connection_errcode); + + counter++; + if (counter >= BGP_CONN_ERROR_DEQUEUE_MAX) + break; } - bgp_event_update(connection, code); + /* Reschedule event if necessary */ + if (more_p) + bgp_conn_err_reschedule(bgp); + + if (bgp_debug_neighbor_events(NULL)) + zlog_debug("%s: dequeued and processed %d peers", __func__, + counter); } diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index a8431bee978a..a1ec16ec4e5b 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -87,6 +87,9 @@ DEFINE_QOBJ_TYPE(peer); DEFINE_HOOK(bgp_inst_delete, (struct bgp *bgp), (bgp)); DEFINE_HOOK(bgp_instance_state, (struct bgp *bgp), (bgp)); +/* Peers with connection error/failure, per bgp instance */ +DECLARE_LIST(bgp_peer_conn_errlist, struct peer, conn_err_link); + /* BGP process wide configuration. */ static struct bgp_master bgp_master; @@ -2710,6 +2713,9 @@ int peer_delete(struct peer *peer) assert(peer->connection->status != Deleted); + if (bgp_debug_neighbor_events(peer)) + zlog_debug("%s: peer %pBP", __func__, peer); + bgp = peer->bgp; accept_peer = CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER); @@ -2727,6 +2733,13 @@ int peer_delete(struct peer *peer) PEER_THREAD_READS_ON)); assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON)); + /* Ensure the peer is removed from the connection error list */ + frr_with_mutex (&bgp->peer_errs_mtx) { + if (bgp_peer_conn_errlist_anywhere(peer)) + bgp_peer_conn_errlist_del(&bgp->peer_conn_errlist, + peer); + } + if (CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT)) peer_nsf_stop(peer); @@ -3634,6 +3647,10 @@ static struct bgp *bgp_create(as_t *as, const char *name, memset(&bgp->ebgprequirespolicywarning, 0, sizeof(bgp->ebgprequirespolicywarning)); + /* Init peer connection error info */ + pthread_mutex_init(&bgp->peer_errs_mtx, NULL); + bgp_peer_conn_errlist_init(&bgp->peer_conn_errlist); + return bgp; } @@ -4128,6 +4145,18 @@ int bgp_delete(struct bgp *bgp) if (i != ZEBRA_ROUTE_BGP) bgp_redistribute_unset(bgp, afi, i, 0); + /* Clear list of peers with connection errors - each + * peer will need to check again, in case the io pthread is racing + * with us, but this batch cleanup should make the per-peer check + * cheaper. + */ + frr_with_mutex (&bgp->peer_errs_mtx) { + do { + peer = bgp_peer_conn_errlist_pop( + &bgp->peer_conn_errlist); + } while (peer != NULL); + } + /* Free peers and peer-groups. */ for (ALL_LIST_ELEMENTS(bgp->group, node, next, group)) peer_group_delete(group); @@ -4144,6 +4173,9 @@ int bgp_delete(struct bgp *bgp) update_bgp_group_free(bgp); + /* Cancel peer connection errors event */ + EVENT_OFF(bgp->t_conn_errors); + /* TODO - Other memory may need to be freed - e.g., NHT */ #ifdef ENABLE_BGP_VNC @@ -4312,6 +4344,9 @@ void bgp_free(struct bgp *bgp) bgp_srv6_cleanup(bgp); bgp_confederation_id_unset(bgp); + bgp_peer_conn_errlist_init(&bgp->peer_conn_errlist); + pthread_mutex_destroy(&bgp->peer_errs_mtx); + for (int i = 0; i < bgp->confed_peers_cnt; i++) XFREE(MTYPE_BGP_NAME, bgp->confed_peers[i].as_pretty); @@ -8945,6 +8980,59 @@ void bgp_gr_apply_running_config(void) } } +/* + * Enqueue a peer with a connection error to be handled in the main pthread + */ +int bgp_enqueue_conn_err_peer(struct bgp *bgp, struct peer *peer, int errcode) +{ + frr_with_mutex (&bgp->peer_errs_mtx) { + peer->connection_errcode = errcode; + + /* Careful not to double-enqueue */ + if (!bgp_peer_conn_errlist_anywhere(peer)) { + bgp_peer_conn_errlist_add_tail(&bgp->peer_conn_errlist, + peer); + } + } + /* Ensure an event is scheduled */ + event_add_event(bm->master, bgp_packet_process_error, bgp, 0, + &bgp->t_conn_errors); + return 0; +} + +/* + * Dequeue a peer that encountered a connection error; signal whether there + * are more queued peers. + */ +struct peer *bgp_dequeue_conn_err_peer(struct bgp *bgp, bool *more_p) +{ + struct peer *peer = NULL; + bool more = false; + + frr_with_mutex (&bgp->peer_errs_mtx) { + peer = bgp_peer_conn_errlist_pop(&bgp->peer_conn_errlist); + + if (bgp_peer_conn_errlist_const_first( + &bgp->peer_conn_errlist) != NULL) + more = true; + } + + if (more_p) + *more_p = more; + + return peer; +} + +/* + * Reschedule the connection error event - probably after processing + * some of the peers on the list. + */ +void bgp_conn_err_reschedule(struct bgp *bgp) +{ + event_add_event(bm->master, bgp_packet_process_error, bgp, 0, + &bgp->t_conn_errors); +} + printfrr_ext_autoreg_p("BP", printfrr_bp); static ssize_t printfrr_bp(struct fbuf *buf, struct printfrr_eargs *ea, const void *ptr) diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index f123188ae8c4..c20145432783 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -375,6 +375,33 @@ struct as_confed { struct bgp_mplsvpn_nh_label_bind_cache; PREDECL_RBTREE_UNIQ(bgp_mplsvpn_nh_label_bind_cache); +/* List of peers that have connection errors in the io pthread */ +PREDECL_LIST(bgp_peer_conn_errlist); + +/* List of info about peers that are being cleared from BGP RIBs in a batch */ +PREDECL_LIST(bgp_clearing_info); + +/* Hash of peers in clearing info object */ +PREDECL_HASH(bgp_clearing_hash); + +/* Info about a batch of peers that need to be cleared from the RIB. + * If many peers need to be cleared, we process them in batches, taking + * one walk through the RIB for each batch. + */ +struct bgp_clearing_info { + /* Hash of peers */ + struct bgp_clearing_hash_head peers; + + /* Event to schedule/reschedule processing */ + struct thread *t_sched; + + /* RIB dest for rescheduling */ + struct bgp_dest *last_dest; + + /* Linkage for list of batches per-bgp */ + struct bgp_clearing_info_item link; +}; + /* BGP instance structure. */ struct bgp { /* AS number of this BGP instance. */ @@ -854,6 +881,21 @@ struct bgp { uint16_t tcp_keepalive_intvl; uint16_t tcp_keepalive_probes; + /* List of peers that have connection errors in the IO pthread */ + struct bgp_peer_conn_errlist_head peer_conn_errlist; + + /* Mutex that guards the connection-errors list */ + pthread_mutex_t peer_errs_mtx; + + /* Event indicating that there have been connection errors; this + * is typically signalled in the IO pthread; it's handled in the + * main pthread. + */ + struct event *t_conn_errors; + + /* List of batches of peers being cleared from BGP RIBs */ + struct bgp_clearing_info_head clearing_list; + struct timeval ebgprequirespolicywarning; #define FIFTEENMINUTE2USEC (int64_t)15 * 60 * 1000000 @@ -1229,7 +1271,6 @@ struct peer_connection { struct event *t_routeadv; struct event *t_process_packet; - struct event *t_process_packet_error; struct event *t_stop_with_notify; @@ -1916,6 +1957,15 @@ struct peer { /* Add-Path Paths-Limit */ struct addpath_paths_limit addpath_paths_limit[AFI_MAX][SAFI_MAX]; + /* Linkage for list of peers with connection errors from IO pthread */ + struct bgp_peer_conn_errlist_item conn_err_link; + + /* Connection error code */ + uint16_t connection_errcode; + + /* Linkage for hash of clearing peers being cleared in a batch */ + struct bgp_clearing_hash_item clear_hash_link; + QOBJ_FIELDS; }; DECLARE_QOBJ_TYPE(peer); @@ -2552,6 +2602,10 @@ void bgp_gr_apply_running_config(void); int bgp_global_gr_init(struct bgp *bgp); int bgp_peer_gr_init(struct peer *peer); +/* APIs for the per-bgp peer connection error list */ +int bgp_enqueue_conn_err_peer(struct bgp *bgp, struct peer *peer, int errcode); +struct peer *bgp_dequeue_conn_err_peer(struct bgp *bgp, bool *more_p); +void bgp_conn_err_reschedule(struct bgp *bgp); #define BGP_GR_ROUTER_DETECT_AND_SEND_CAPABILITY_TO_ZEBRA(_bgp, _peer_list) \ do { \ From affc54acb75fba5faceef16eeb85993898240ef3 Mon Sep 17 00:00:00 2001 From: Mark Stapp Date: Tue, 1 Oct 2024 09:23:26 -0400 Subject: [PATCH 2/5] bgpd: remove apis from bgp_route.h Remove a couple of apis that don't exist. Signed-off-by: Mark Stapp --- bgpd/bgp_route.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h index 1df0ffd300e1..0ae53889153a 100644 --- a/bgpd/bgp_route.h +++ b/bgpd/bgp_route.h @@ -899,9 +899,6 @@ extern bool subgroup_announce_check(struct bgp_dest *dest, const struct prefix *p, struct attr *attr, struct attr *post_attr); -extern void bgp_peer_clear_node_queue_drain_immediate(struct peer *peer); -extern void bgp_process_queues_drain_immediate(void); - /* for encap/vpn */ extern struct bgp_dest *bgp_safi_node_lookup(struct bgp_table *table, safi_t safi, From 7e73ebf95b702488a4c03755c974a91821c4bac8 Mon Sep 17 00:00:00 2001 From: Mark Stapp Date: Tue, 1 Oct 2024 16:30:44 -0400 Subject: [PATCH 3/5] bgpd: batch peer connection error clearing When peer connections encounter errors, attempt to batch some of the clearing processing that occurs. Add a new batch object, add multiple peers to it, if possible. Do one rib walk for the batch, rather than one walk per peer. Use a handler callback per batch to check and remove peers' path-infos, rather than a work-queue and callback per peer. The original clearing code remains; it's used for single peers. Signed-off-by: Mark Stapp --- bgpd/bgp_fsm.c | 3 +- bgpd/bgp_memory.h | 2 + bgpd/bgp_packet.c | 59 --------- bgpd/bgp_packet.h | 2 - bgpd/bgp_route.c | 232 +++++++++++++++++++++++++++++++++ bgpd/bgp_route.h | 3 + bgpd/bgpd.c | 324 ++++++++++++++++++++++++++++++++++++++++++++-- bgpd/bgpd.h | 55 +++++++- 8 files changed, 603 insertions(+), 77 deletions(-) diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c index 4ac8201f749c..ed735df87308 100644 --- a/bgpd/bgp_fsm.c +++ b/bgpd/bgp_fsm.c @@ -1271,7 +1271,8 @@ void bgp_fsm_change_status(struct peer_connection *connection, * Clearing * (or Deleted). */ - if (!work_queue_is_scheduled(peer->clear_node_queue) && + if (!CHECK_FLAG(peer->flags, PEER_FLAG_CLEARING_BATCH) && + !work_queue_is_scheduled(peer->clear_node_queue) && status != Deleted) BGP_EVENT_ADD(connection, Clearing_Completed); } diff --git a/bgpd/bgp_memory.h b/bgpd/bgp_memory.h index 1f76945da3e0..517e34feba84 100644 --- a/bgpd/bgp_memory.h +++ b/bgpd/bgp_memory.h @@ -136,4 +136,6 @@ DECLARE_MTYPE(BGP_SOFT_VERSION); DECLARE_MTYPE(BGP_EVPN_OVERLAY); +DECLARE_MTYPE(CLEARING_BATCH); + #endif /* _QUAGGA_BGP_MEMORY_H */ diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c index 1bddd543c658..8ce0cdf89fd0 100644 --- a/bgpd/bgp_packet.c +++ b/bgpd/bgp_packet.c @@ -4196,62 +4196,3 @@ void bgp_send_delayed_eor(struct bgp *bgp) for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) bgp_write_proceed_actions(peer); } - -/* - * Task callback in the main pthread to handle socket error - * encountered in the io pthread. We avoid having the io pthread try - * to enqueue fsm events or mess with the peer struct. - */ - -/* Max number of peers to process without rescheduling */ -#define BGP_CONN_ERROR_DEQUEUE_MAX 10 - -void bgp_packet_process_error(struct event *thread) -{ - struct peer_connection *connection; - struct peer *peer; - struct bgp *bgp; - int counter = 0; - bool more_p = false; - - bgp = EVENT_ARG(thread); - - /* Dequeue peers from the error list */ - while ((peer = bgp_dequeue_conn_err_peer(bgp, &more_p)) != NULL) { - connection = peer->connection; - - if (bgp_debug_neighbor_events(peer)) - zlog_debug("%s [Event] BGP error %d on fd %d", - peer->host, peer->connection_errcode, - connection->fd); - - /* Closed connection or error on the socket */ - if (peer_established(connection)) { - if ((CHECK_FLAG(peer->flags, PEER_FLAG_GRACEFUL_RESTART) - || CHECK_FLAG(peer->flags, - PEER_FLAG_GRACEFUL_RESTART_HELPER)) - && CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_MODE)) { - peer->last_reset = PEER_DOWN_NSF_CLOSE_SESSION; - SET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT); - } else - peer->last_reset = PEER_DOWN_CLOSE_SESSION; - } - - /* No need for keepalives, if enabled */ - bgp_keepalives_off(connection); - - bgp_event_update(connection, peer->connection_errcode); - - counter++; - if (counter >= BGP_CONN_ERROR_DEQUEUE_MAX) - break; - } - - /* Reschedule event if necessary */ - if (more_p) - bgp_conn_err_reschedule(bgp); - - if (bgp_debug_neighbor_events(NULL)) - zlog_debug("%s: dequeued and processed %d peers", __func__, - counter); -} diff --git a/bgpd/bgp_packet.h b/bgpd/bgp_packet.h index c266b17266ec..3f7106fd37ed 100644 --- a/bgpd/bgp_packet.h +++ b/bgpd/bgp_packet.h @@ -75,8 +75,6 @@ extern void bgp_process_packet(struct event *event); extern void bgp_send_delayed_eor(struct bgp *bgp); -/* Task callback to handle socket error encountered in the io pthread */ -void bgp_packet_process_error(struct event *thread); extern struct bgp_notify bgp_notify_decapsulate_hard_reset(struct bgp_notify *notify); extern bool bgp_has_graceful_restart_notification(struct peer *peer); diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c index 72e798a7e2c9..a5f287a0688a 100644 --- a/bgpd/bgp_route.c +++ b/bgpd/bgp_route.c @@ -78,6 +78,9 @@ #include "bgpd/bgp_route_clippy.c" +/* Memory for batched clearing of peers from the RIB */ +DEFINE_MTYPE(BGPD, CLEARING_BATCH, "Clearing batch"); + DEFINE_HOOK(bgp_snmp_update_stats, (struct bgp_dest *rn, struct bgp_path_info *pi, bool added), (rn, pi, added)); @@ -6186,11 +6189,240 @@ void bgp_clear_route(struct peer *peer, afi_t afi, safi_t safi) peer_unlock(peer); } +/* + * Callback scheduled to process prefixes/dests for batch clearing; the + * dests were found via a rib walk. + * The one-peer version of this uses a per-peer workqueue to manage + * rescheduling, but we're just using a fixed limit here. + */ + +/* Limit the number of dests we'll process per callback */ +#define BGP_CLEARING_BATCH_MAX_DESTS 100 + +static void bgp_clear_batch_dests_task(struct event *event) +{ + struct bgp_clearing_info *cinfo = EVENT_ARG(event); + struct bgp_dest *dest; + struct bgp_path_info *pi; + struct bgp_table *table; + struct bgp *bgp; + afi_t afi; + safi_t safi; + int counter = 0; + + bgp = cinfo->bgp; + +next_dest: + + dest = bgp_clearing_batch_next_dest(cinfo); + if (dest == NULL) + goto done; + + table = bgp_dest_table(dest); + afi = table->afi; + safi = table->safi; + + /* Have to check every path: it is possible that we have multiple paths + * for a prefix from a peer if that peer is using AddPath. + */ + for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) { + if (!bgp_clearing_batch_check_peer(cinfo, pi->peer)) + continue; + + /* graceful restart STALE flag set. */ + if (((CHECK_FLAG(pi->peer->sflags, PEER_STATUS_NSF_WAIT) + && pi->peer->nsf[afi][safi]) + || CHECK_FLAG(pi->peer->af_sflags[afi][safi], + PEER_STATUS_ENHANCED_REFRESH)) + && !CHECK_FLAG(pi->flags, BGP_PATH_STALE) + && !CHECK_FLAG(pi->flags, BGP_PATH_UNUSEABLE)) + bgp_path_info_set_flag(dest, pi, BGP_PATH_STALE); + else { + /* If this is an EVPN route, process for + * un-import. */ + if (safi == SAFI_EVPN) + bgp_evpn_unimport_route( + bgp, afi, safi, + bgp_dest_get_prefix(dest), pi); + /* Handle withdraw for VRF route-leaking and L3VPN */ + if (SAFI_UNICAST == safi + && (bgp->inst_type == BGP_INSTANCE_TYPE_VRF || + bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT)) { + vpn_leak_from_vrf_withdraw(bgp_get_default(), + bgp, pi); + } + if (SAFI_MPLS_VPN == safi && + bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT) { + vpn_leak_to_vrf_withdraw(pi); + } + + bgp_rib_remove(dest, pi, pi->peer, afi, safi); + } + } + + /* Unref this dest and table */ + bgp_dest_unlock_node(dest); + bgp_table_unlock(bgp_dest_table(dest)); + + counter++; + if (counter < BGP_CLEARING_BATCH_MAX_DESTS) + goto next_dest; + +done: + + /* If there are still dests to process, reschedule. */ + if (bgp_clearing_batch_dests_present(cinfo)) { + if (bgp_debug_neighbor_events(NULL)) + zlog_debug("%s: Batch %p: Rescheduled after processing %d dests", + __func__, cinfo, counter); + + event_add_event(bm->master, bgp_clear_batch_dests_task, cinfo, + 0, &cinfo->t_sched); + } else { + if (bgp_debug_neighbor_events(NULL)) + zlog_debug("%s: Batch %p: Done after processing %d dests", + __func__, cinfo, counter); + bgp_clearing_batch_completed(cinfo); + } + + return; +} + +/* + * Walk a single table for batch peer clearing processing + */ +static void clear_batch_table_helper(struct bgp_clearing_info *cinfo, + struct bgp_table *table) +{ + struct bgp_dest *dest; + bool force = (cinfo->bgp->process_queue == NULL); + uint32_t examined = 0, queued = 0; + + for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest)) { + struct bgp_path_info *pi, *next; + struct bgp_adj_in *ain; + struct bgp_adj_in *ain_next; + + examined++; + + ain = dest->adj_in; + while (ain) { + ain_next = ain->next; + + if (bgp_clearing_batch_check_peer(cinfo, ain->peer)) + bgp_adj_in_remove(&dest, ain); + + ain = ain_next; + + assert(dest != NULL); + } + + for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = next) { + next = pi->next; + if (!bgp_clearing_batch_check_peer(cinfo, pi->peer)) + continue; + + queued++; + + if (force) { + bgp_path_info_reap(dest, pi); + } else { + /* Unlocked after processing */ + bgp_table_lock(bgp_dest_table(dest)); + bgp_dest_lock_node(dest); + + bgp_clearing_batch_add_dest(cinfo, dest); + break; + } + } + } + + if (examined > 0) { + if (bgp_debug_neighbor_events(NULL)) + zlog_debug("%s: %s/%s: examined %u, queued %u", + __func__, afi2str(table->afi), + safi2str(table->safi), examined, queued); + } +} + +/* + * RIB-walking helper for batch clearing work: walk all tables, identify + * dests that are affected by the peers in the batch, enqueue the dests for + * async processing. + */ +static void clear_batch_rib_helper(struct bgp_clearing_info *cinfo) +{ + afi_t afi; + safi_t safi; + struct bgp_dest *dest; + struct bgp_table *table; + + FOREACH_AFI_SAFI (afi, safi) { + /* Identify table to be examined */ + if (safi != SAFI_MPLS_VPN && safi != SAFI_ENCAP && + safi != SAFI_EVPN) { + table = cinfo->bgp->rib[afi][safi]; + if (!table) + continue; + + clear_batch_table_helper(cinfo, table); + } else { + for (dest = bgp_table_top(cinfo->bgp->rib[afi][safi]); + dest; dest = bgp_route_next(dest)) { + table = bgp_dest_get_bgp_table_info(dest); + if (!table) + continue; + + /* TODO -- record the tables we've seen + * and don't repeat any? + */ + + clear_batch_table_helper(cinfo, table); + } + } + } +} + +/* + * Identify prefixes that need to be cleared for a batch of peers in 'cinfo'. + * The actual clearing processing will be done async... + */ +void bgp_clear_route_batch(struct bgp_clearing_info *cinfo) +{ + if (bgp_debug_neighbor_events(NULL)) + zlog_debug("%s: BGP %s, batch %p", __func__, + cinfo->bgp->name_pretty, cinfo); + + /* Walk the rib, checking the peers in the batch */ + clear_batch_rib_helper(cinfo); + + /* If we found some prefixes, schedule a task to begin work. */ + if (bgp_clearing_batch_dests_present(cinfo)) + event_add_event(bm->master, bgp_clear_batch_dests_task, cinfo, + 0, &cinfo->t_sched); + + /* NB -- it's the caller's job to clean up, release refs, etc. if + * we didn't find any dests + */ +} + void bgp_clear_route_all(struct peer *peer) { afi_t afi; safi_t safi; + /* We may be able to batch multiple peers' clearing work: check + * and see. + */ + if (bgp_clearing_batch_add_peer(peer->bgp, peer)) { + if (bgp_debug_neighbor_events(peer)) + zlog_debug("%s: peer %pBP batched", __func__, peer); + return; + } + + if (bgp_debug_neighbor_events(peer)) + zlog_debug("%s: peer %pBP", __func__, peer); + FOREACH_AFI_SAFI (afi, safi) bgp_clear_route(peer, afi, safi); diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h index 0ae53889153a..8c1b4a104365 100644 --- a/bgpd/bgp_route.h +++ b/bgpd/bgp_route.h @@ -746,6 +746,9 @@ extern void bgp_soft_reconfig_table_task_cancel(const struct bgp *bgp, extern bool bgp_soft_reconfig_in(struct peer *peer, afi_t afi, safi_t safi); extern void bgp_clear_route(struct peer *, afi_t, safi_t); extern void bgp_clear_route_all(struct peer *); +/* Clear routes for a batch of peers */ +void bgp_clear_route_batch(struct bgp_clearing_info *cinfo); + extern void bgp_clear_adj_in(struct peer *, afi_t, safi_t); extern void bgp_clear_stale_route(struct peer *, afi_t, safi_t); extern void bgp_set_stale_route(struct peer *peer, afi_t afi, safi_t safi); diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index a1ec16ec4e5b..dd900d9689f5 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -88,7 +88,20 @@ DEFINE_HOOK(bgp_inst_delete, (struct bgp *bgp), (bgp)); DEFINE_HOOK(bgp_instance_state, (struct bgp *bgp), (bgp)); /* Peers with connection error/failure, per bgp instance */ -DECLARE_LIST(bgp_peer_conn_errlist, struct peer, conn_err_link); +DECLARE_DLIST(bgp_peer_conn_errlist, struct peer, conn_err_link); + +/* List of info about peers that are being cleared from BGP RIBs in a batch */ +DECLARE_DLIST(bgp_clearing_info, struct bgp_clearing_info, link); + +/* List of dests that need to be processed in a clearing batch */ +DECLARE_LIST(bgp_clearing_destlist, struct bgp_clearing_dest, link); + +/* Hash of peers in clearing info object */ +static int peer_clearing_hash_cmp(const struct peer *p1, const struct peer *p2); +static uint32_t peer_clearing_hashfn(const struct peer *p1); + +DECLARE_HASH(bgp_clearing_hash, struct peer, clear_hash_link, + peer_clearing_hash_cmp, peer_clearing_hashfn); /* BGP process wide configuration. */ static struct bgp_master bgp_master; @@ -3650,6 +3663,7 @@ static struct bgp *bgp_create(as_t *as, const char *name, /* Init peer connection error info */ pthread_mutex_init(&bgp->peer_errs_mtx, NULL); bgp_peer_conn_errlist_init(&bgp->peer_conn_errlist); + bgp_clearing_info_init(&bgp->clearing_list); return bgp; } @@ -4023,6 +4037,7 @@ int bgp_delete(struct bgp *bgp) struct bgp_table *dest_table = NULL; struct graceful_restart_info *gr_info; uint32_t cnt_before, cnt_after; + struct bgp_clearing_info *cinfo; assert(bgp); @@ -4047,6 +4062,10 @@ int bgp_delete(struct bgp *bgp) zlog_debug("Zebra Announce Fifo cleanup count before %u and after %u during BGP %s deletion", cnt_before, cnt_after, bgp->name_pretty); + /* Cleanup for peer connection batching */ + while ((cinfo = bgp_clearing_info_first(&bgp->clearing_list)) != NULL) + bgp_clearing_batch_completed(cinfo); + bgp_soft_reconfig_table_task_cancel(bgp, NULL, NULL); /* make sure we withdraw any exported routes */ @@ -4067,6 +4086,8 @@ int bgp_delete(struct bgp *bgp) EVENT_OFF(bgp->t_maxmed_onstartup); EVENT_OFF(bgp->t_update_delay); EVENT_OFF(bgp->t_establish_wait); + /* Cancel peer connection errors event */ + EVENT_OFF(bgp->t_conn_errors); /* Set flag indicating bgp instance delete in progress */ SET_FLAG(bgp->flags, BGP_FLAG_DELETE_IN_PROGRESS); @@ -4172,10 +4193,6 @@ int bgp_delete(struct bgp *bgp) } update_bgp_group_free(bgp); - - /* Cancel peer connection errors event */ - EVENT_OFF(bgp->t_conn_errors); - /* TODO - Other memory may need to be freed - e.g., NHT */ #ifdef ENABLE_BGP_VNC @@ -8980,8 +8997,299 @@ void bgp_gr_apply_running_config(void) } } +/* Hash of peers in clearing info object */ +static int peer_clearing_hash_cmp(const struct peer *p1, const struct peer *p2) +{ + if (p1 == p2) + return 0; + else if (p1 < p2) + return -1; + else + return 1; +} + +static uint32_t peer_clearing_hashfn(const struct peer *p1) +{ + return (uint32_t)((intptr_t)p1 & 0xffffffffULL); +} + +/* + * Free a clearing batch: this really just does the memory cleanup; the + * clearing code is expected to manage the peer, dest, table, etc refcounts + */ +static void bgp_clearing_batch_free(struct bgp *bgp, + struct bgp_clearing_info **pinfo) +{ + struct bgp_clearing_info *cinfo = *pinfo; + struct bgp_clearing_dest *destinfo; + + if (bgp_clearing_info_anywhere(cinfo)) + bgp_clearing_info_del(&bgp->clearing_list, cinfo); + + while ((destinfo = bgp_clearing_destlist_pop(&cinfo->destlist)) != NULL) + XFREE(MTYPE_CLEARING_BATCH, destinfo); + + bgp_clearing_hash_fini(&cinfo->peers); + + XFREE(MTYPE_CLEARING_BATCH, *pinfo); +} + +/* + * Done with a peer that was part of a clearing batch + */ +static void bgp_clearing_peer_done(struct peer *peer) +{ + /* Tickle FSM to start moving again */ + BGP_EVENT_ADD(peer->connection, Clearing_Completed); + + peer_unlock(peer); /* bgp_clear_route */ +} + +/* + * Initialize a new batch struct for clearing peer(s) from the RIB + */ +static void bgp_clearing_batch_begin(struct bgp *bgp) +{ + struct bgp_clearing_info *cinfo; + + cinfo = XCALLOC(MTYPE_CLEARING_BATCH, sizeof(struct bgp_clearing_info)); + + cinfo->bgp = bgp; + + /* Init hash of peers and list of dests */ + bgp_clearing_hash_init(&cinfo->peers); + bgp_clearing_destlist_init(&cinfo->destlist); + + /* Batch is open for more peers */ + SET_FLAG(cinfo->flags, BGP_CLEARING_INFO_FLAG_OPEN); + + bgp_clearing_info_add_head(&bgp->clearing_list, cinfo); +} + +/* + * Close a batch of clearing peers, and begin working on the RIB + */ +static void bgp_clearing_batch_end(struct bgp *bgp) +{ + struct bgp_clearing_info *cinfo; + + cinfo = bgp_clearing_info_first(&bgp->clearing_list); + + assert(cinfo != NULL); + assert(CHECK_FLAG(cinfo->flags, BGP_CLEARING_INFO_FLAG_OPEN)); + + /* Batch is closed */ + UNSET_FLAG(cinfo->flags, BGP_CLEARING_INFO_FLAG_OPEN); + + /* If we have no peers to examine, just discard the batch info */ + if (bgp_clearing_hash_count(&cinfo->peers) == 0) { + bgp_clearing_batch_free(bgp, &cinfo); + return; + } + + /* Do a RIB walk for the current batch. If it finds dests/prefixes + * to work on, this will schedule a task to process + * the dests/prefixes in the batch. + */ + bgp_clear_route_batch(cinfo); + + /* If we found no prefixes/dests, just discard the batch, + * remembering that we're holding a ref for each peer. + */ + if (bgp_clearing_destlist_count(&cinfo->destlist) == 0) { + bgp_clearing_batch_completed(cinfo); + } +} + +/* Check whether a dest's peer is relevant to a clearing batch */ +bool bgp_clearing_batch_check_peer(struct bgp_clearing_info *cinfo, + const struct peer *peer) +{ + struct peer *p; + + p = bgp_clearing_hash_find(&cinfo->peers, peer); + return (p != NULL); +} + +/* + * Check whether a clearing batch has any dests to process + */ +bool bgp_clearing_batch_dests_present(struct bgp_clearing_info *cinfo) +{ + return (bgp_clearing_destlist_count(&cinfo->destlist) > 0); +} + +/* + * Done with a peer clearing batch; deal with refcounts, free memory + */ +void bgp_clearing_batch_completed(struct bgp_clearing_info *cinfo) +{ + struct peer *peer; + struct bgp_dest *dest; + struct bgp_clearing_dest *destinfo; + + /* Ensure event is not scheduled */ + event_cancel_event(bm->master, &cinfo->t_sched); + + /* Remove all peers and un-ref */ + while ((peer = bgp_clearing_hash_pop(&cinfo->peers)) != NULL) + bgp_clearing_peer_done(peer); + + /* Remove any dests/prefixes and unlock (should have been done + * by processing, so this is belt-and-suspenders) + */ + destinfo = bgp_clearing_destlist_pop(&cinfo->destlist); + if (destinfo) { + dest = destinfo->dest; + XFREE(MTYPE_CLEARING_BATCH, destinfo); + + bgp_dest_unlock_node(dest); + bgp_table_unlock(bgp_dest_table(dest)); + } + + /* Free memory */ + bgp_clearing_batch_free(cinfo->bgp, &cinfo); +} + +/* + * Add a prefix/dest to a clearing batch + */ +void bgp_clearing_batch_add_dest(struct bgp_clearing_info *cinfo, + struct bgp_dest *dest) +{ + struct bgp_clearing_dest *destinfo; + + destinfo = XCALLOC(MTYPE_CLEARING_BATCH, + sizeof(struct bgp_clearing_dest)); + + destinfo->dest = dest; + bgp_clearing_destlist_add_tail(&cinfo->destlist, destinfo); +} + +/* + * Return the next dest for batch clear processing + */ +struct bgp_dest *bgp_clearing_batch_next_dest(struct bgp_clearing_info *cinfo) +{ + struct bgp_clearing_dest *destinfo; + struct bgp_dest *dest = NULL; + + destinfo = bgp_clearing_destlist_pop(&cinfo->destlist); + if (destinfo) { + dest = destinfo->dest; + XFREE(MTYPE_CLEARING_BATCH, destinfo); + } + + return dest; +} + +/* If a clearing batch is available for 'peer', add it and return 'true', + * else return 'false'. + */ +bool bgp_clearing_batch_add_peer(struct bgp *bgp, struct peer *peer) +{ + struct bgp_clearing_info *cinfo; + + cinfo = bgp_clearing_info_first(&bgp->clearing_list); + + if (cinfo && CHECK_FLAG(cinfo->flags, BGP_CLEARING_INFO_FLAG_OPEN)) { + if (!CHECK_FLAG(peer->flags, PEER_FLAG_CLEARING_BATCH)) { + /* Add a peer ref */ + peer_lock(peer); + + bgp_clearing_hash_add(&cinfo->peers, peer); + SET_FLAG(peer->flags, PEER_FLAG_CLEARING_BATCH); + } + return true; + } + + return false; +} + +/* + * Task callback in the main pthread to handle socket errors + * encountered in the io pthread. We avoid having the io pthread try + * to enqueue fsm events or mess with the peer struct. + */ + +/* TODO -- should this be configurable? */ +/* Max number of peers to process without rescheduling */ +#define BGP_CONN_ERROR_DEQUEUE_MAX 10 + +static void bgp_process_conn_error(struct event *event) +{ + struct bgp *bgp; + struct peer *peer; + struct peer_connection *connection; + int counter = 0; + size_t list_count = 0; + bool more_p = false; + + bgp = EVENT_ARG(event); + + frr_with_mutex (&bgp->peer_errs_mtx) { + peer = bgp_peer_conn_errlist_pop(&bgp->peer_conn_errlist); + + list_count = + bgp_peer_conn_errlist_count(&bgp->peer_conn_errlist); + } + + /* If we have multiple peers with errors, try to batch some + * clearing work. + */ + if (list_count > 0) + bgp_clearing_batch_begin(bgp); + + /* Dequeue peers from the error list */ + while (peer != NULL) { + connection = peer->connection; + + if (bgp_debug_neighbor_events(peer)) + zlog_debug("%s [Event] BGP error %d on fd %d", + peer->host, peer->connection_errcode, + connection->fd); + + /* Closed connection or error on the socket */ + if (peer_established(connection)) { + if ((CHECK_FLAG(peer->flags, PEER_FLAG_GRACEFUL_RESTART) + || CHECK_FLAG(peer->flags, + PEER_FLAG_GRACEFUL_RESTART_HELPER)) + && CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_MODE)) { + peer->last_reset = PEER_DOWN_NSF_CLOSE_SESSION; + SET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT); + } else + peer->last_reset = PEER_DOWN_CLOSE_SESSION; + } + + /* No need for keepalives, if enabled */ + bgp_keepalives_off(peer->connection); + + /* Drive into state-machine changes */ + bgp_event_update(connection, peer->connection_errcode); + + counter++; + if (counter >= BGP_CONN_ERROR_DEQUEUE_MAX) + break; + + peer = bgp_dequeue_conn_err_peer(bgp, &more_p); + } + + /* Reschedule event if necessary */ + if (more_p) + bgp_conn_err_reschedule(bgp); + + /* Done with a clearing batch */ + if (list_count > 0) + bgp_clearing_batch_end(bgp); + + if (bgp_debug_neighbor_events(NULL)) + zlog_debug("%s: dequeued and processed %d peers", __func__, + counter); +} + /* - * Enqueue a peer with a connection error to be handled in the main pthread + * Enqueue a peer with a connection error to be handled in the main pthread; + * this is called from the io pthread. */ int bgp_enqueue_conn_err_peer(struct bgp *bgp, struct peer *peer, int errcode) { @@ -8995,7 +9303,7 @@ int bgp_enqueue_conn_err_peer(struct bgp *bgp, struct peer *peer, int errcode) } } /* Ensure an event is scheduled */ - event_add_event(bm->master, bgp_packet_process_error, bgp, 0, + event_add_event(bm->master, bgp_process_conn_error, bgp, 0, &bgp->t_conn_errors); return 0; } @@ -9029,7 +9337,7 @@ struct peer *bgp_dequeue_conn_err_peer(struct bgp *bgp, bool *more_p) */ void bgp_conn_err_reschedule(struct bgp *bgp) { - event_add_event(bm->master, bgp_packet_process_error, bgp, 0, + event_add_event(bm->master, bgp_process_conn_error, bgp, 0, &bgp->t_conn_errors); } diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index c20145432783..cbeb30d8b4d5 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -376,32 +376,54 @@ struct bgp_mplsvpn_nh_label_bind_cache; PREDECL_RBTREE_UNIQ(bgp_mplsvpn_nh_label_bind_cache); /* List of peers that have connection errors in the io pthread */ -PREDECL_LIST(bgp_peer_conn_errlist); +PREDECL_DLIST(bgp_peer_conn_errlist); /* List of info about peers that are being cleared from BGP RIBs in a batch */ -PREDECL_LIST(bgp_clearing_info); +PREDECL_DLIST(bgp_clearing_info); /* Hash of peers in clearing info object */ PREDECL_HASH(bgp_clearing_hash); +/* List of dests that need to be processed in a clearing batch */ +PREDECL_LIST(bgp_clearing_destlist); + +struct bgp_clearing_dest { + struct bgp_dest *dest; + struct bgp_clearing_destlist_item link; +}; + /* Info about a batch of peers that need to be cleared from the RIB. * If many peers need to be cleared, we process them in batches, taking - * one walk through the RIB for each batch. + * one walk through the RIB for each batch. This is only used for "all" + * afi/safis, typically when processing peer connection errors. */ struct bgp_clearing_info { + /* Owning bgp instance */ + struct bgp *bgp; + /* Hash of peers */ struct bgp_clearing_hash_head peers; + /* Flags */ + uint32_t flags; + + /* List of dests - wrapped by a small wrapper struct */ + struct bgp_clearing_destlist_head destlist; + /* Event to schedule/reschedule processing */ - struct thread *t_sched; + struct event *t_sched; + + /* TODO -- id, serial number, for debugging/logging? */ - /* RIB dest for rescheduling */ - struct bgp_dest *last_dest; + /* TODO -- info for rescheduling the RIB walk? future? */ - /* Linkage for list of batches per-bgp */ + /* Linkage for list of batches per bgp */ struct bgp_clearing_info_item link; }; +/* Batch is open, new peers can be added */ +#define BGP_CLEARING_INFO_FLAG_OPEN (1 << 0) + /* BGP instance structure. */ struct bgp { /* AS number of this BGP instance. */ @@ -1561,6 +1583,8 @@ struct peer { #define PEER_FLAG_AS_LOOP_DETECTION (1ULL << 38) /* as path loop detection */ #define PEER_FLAG_EXTENDED_LINK_BANDWIDTH (1ULL << 39) #define PEER_FLAG_DUAL_AS (1ULL << 40) +/* Peer is part of a batch clearing its routes */ +#define PEER_FLAG_CLEARING_BATCH (1ULL << 41) /* *GR-Disabled mode means unset PEER_FLAG_GRACEFUL_RESTART @@ -2934,6 +2958,23 @@ extern void srv6_function_free(struct bgp_srv6_function *func); extern void bgp_session_reset_safe(struct peer *peer, struct listnode **nnode); +/* If a clearing batch is available for 'peer', add it and return 'true', + * else return 'false'. + */ +bool bgp_clearing_batch_add_peer(struct bgp *bgp, struct peer *peer); +/* Add a prefix/dest to a clearing batch */ +void bgp_clearing_batch_add_dest(struct bgp_clearing_info *cinfo, + struct bgp_dest *dest); +/* Check whether a dest's peer is relevant to a clearing batch */ +bool bgp_clearing_batch_check_peer(struct bgp_clearing_info *cinfo, + const struct peer *peer); +/* Check whether a clearing batch has any dests to process */ +bool bgp_clearing_batch_dests_present(struct bgp_clearing_info *cinfo); +/* Returns the next dest for batch clear processing */ +struct bgp_dest *bgp_clearing_batch_next_dest(struct bgp_clearing_info *cinfo); +/* Done with a peer clearing batch; deal with refcounts, free memory */ +void bgp_clearing_batch_completed(struct bgp_clearing_info *cinfo); + #ifdef _FRR_ATTRIBUTE_PRINTFRR /* clang-format off */ #pragma FRR printfrr_ext "%pBP" (struct peer *) From cdd3a61496d2a55764b25f749ecd3b33bfa55299 Mon Sep 17 00:00:00 2001 From: Mark Stapp Date: Thu, 7 Nov 2024 10:55:19 -0500 Subject: [PATCH 4/5] zebra: move peer conn error list to connection struct Move the peer connection error list to the peer_connection struct; that seems to line up better with the way that struct works. Signed-off-by: Mark Stapp --- bgpd/bgp_io.c | 2 +- bgpd/bgpd.c | 44 +++++++++++++++++++++++--------------------- bgpd/bgpd.h | 17 +++++++++-------- 3 files changed, 33 insertions(+), 30 deletions(-) diff --git a/bgpd/bgp_io.c b/bgpd/bgp_io.c index a00fed0973e9..c7aa946f4463 100644 --- a/bgpd/bgp_io.c +++ b/bgpd/bgp_io.c @@ -251,7 +251,7 @@ static void bgp_process_reads(struct event *thread) /* Handle the error in the main pthread, include the * specific state change from 'bgp_read'. */ - bgp_enqueue_conn_err_peer(peer->bgp, connection->peer, code); + bgp_enqueue_conn_err(peer->bgp, connection, code); goto done; } diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index dd900d9689f5..d579fde40cb6 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -88,7 +88,7 @@ DEFINE_HOOK(bgp_inst_delete, (struct bgp *bgp), (bgp)); DEFINE_HOOK(bgp_instance_state, (struct bgp *bgp), (bgp)); /* Peers with connection error/failure, per bgp instance */ -DECLARE_DLIST(bgp_peer_conn_errlist, struct peer, conn_err_link); +DECLARE_DLIST(bgp_peer_conn_errlist, struct peer_connection, conn_err_link); /* List of info about peers that are being cleared from BGP RIBs in a batch */ DECLARE_DLIST(bgp_clearing_info, struct bgp_clearing_info, link); @@ -2748,9 +2748,9 @@ int peer_delete(struct peer *peer) /* Ensure the peer is removed from the connection error list */ frr_with_mutex (&bgp->peer_errs_mtx) { - if (bgp_peer_conn_errlist_anywhere(peer)) + if (bgp_peer_conn_errlist_anywhere(peer->connection)) bgp_peer_conn_errlist_del(&bgp->peer_conn_errlist, - peer); + peer->connection); } if (CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT)) @@ -4038,6 +4038,7 @@ int bgp_delete(struct bgp *bgp) struct graceful_restart_info *gr_info; uint32_t cnt_before, cnt_after; struct bgp_clearing_info *cinfo; + struct peer_connection *connection; assert(bgp); @@ -4173,9 +4174,9 @@ int bgp_delete(struct bgp *bgp) */ frr_with_mutex (&bgp->peer_errs_mtx) { do { - peer = bgp_peer_conn_errlist_pop( + connection = bgp_peer_conn_errlist_pop( &bgp->peer_conn_errlist); - } while (peer != NULL); + } while (connection != NULL); } /* Free peers and peer-groups. */ @@ -9228,7 +9229,7 @@ static void bgp_process_conn_error(struct event *event) bgp = EVENT_ARG(event); frr_with_mutex (&bgp->peer_errs_mtx) { - peer = bgp_peer_conn_errlist_pop(&bgp->peer_conn_errlist); + connection = bgp_peer_conn_errlist_pop(&bgp->peer_conn_errlist); list_count = bgp_peer_conn_errlist_count(&bgp->peer_conn_errlist); @@ -9241,12 +9242,12 @@ static void bgp_process_conn_error(struct event *event) bgp_clearing_batch_begin(bgp); /* Dequeue peers from the error list */ - while (peer != NULL) { - connection = peer->connection; + while (connection != NULL) { + peer = connection->peer; if (bgp_debug_neighbor_events(peer)) zlog_debug("%s [Event] BGP error %d on fd %d", - peer->host, peer->connection_errcode, + peer->host, connection->connection_errcode, connection->fd); /* Closed connection or error on the socket */ @@ -9265,13 +9266,13 @@ static void bgp_process_conn_error(struct event *event) bgp_keepalives_off(peer->connection); /* Drive into state-machine changes */ - bgp_event_update(connection, peer->connection_errcode); + bgp_event_update(connection, connection->connection_errcode); counter++; if (counter >= BGP_CONN_ERROR_DEQUEUE_MAX) break; - peer = bgp_dequeue_conn_err_peer(bgp, &more_p); + connection = bgp_dequeue_conn_err(bgp, &more_p); } /* Reschedule event if necessary */ @@ -9288,18 +9289,19 @@ static void bgp_process_conn_error(struct event *event) } /* - * Enqueue a peer with a connection error to be handled in the main pthread; + * Enqueue a connection with an error to be handled in the main pthread; * this is called from the io pthread. */ -int bgp_enqueue_conn_err_peer(struct bgp *bgp, struct peer *peer, int errcode) +int bgp_enqueue_conn_err(struct bgp *bgp, struct peer_connection *connection, + int errcode) { frr_with_mutex (&bgp->peer_errs_mtx) { - peer->connection_errcode = errcode; + connection->connection_errcode = errcode; /* Careful not to double-enqueue */ - if (!bgp_peer_conn_errlist_anywhere(peer)) { + if (!bgp_peer_conn_errlist_anywhere(connection)) { bgp_peer_conn_errlist_add_tail(&bgp->peer_conn_errlist, - peer); + connection); } } /* Ensure an event is scheduled */ @@ -9309,16 +9311,16 @@ int bgp_enqueue_conn_err_peer(struct bgp *bgp, struct peer *peer, int errcode) } /* - * Dequeue a peer that encountered a connection error; signal whether there + * Dequeue a connection that encountered a connection error; signal whether there * are more queued peers. */ -struct peer *bgp_dequeue_conn_err_peer(struct bgp *bgp, bool *more_p) +struct peer_connection *bgp_dequeue_conn_err(struct bgp *bgp, bool *more_p) { - struct peer *peer = NULL; + struct peer_connection *connection = NULL; bool more = false; frr_with_mutex (&bgp->peer_errs_mtx) { - peer = bgp_peer_conn_errlist_pop(&bgp->peer_conn_errlist); + connection = bgp_peer_conn_errlist_pop(&bgp->peer_conn_errlist); if (bgp_peer_conn_errlist_const_first( &bgp->peer_conn_errlist) != NULL) @@ -9328,7 +9330,7 @@ struct peer *bgp_dequeue_conn_err_peer(struct bgp *bgp, bool *more_p) if (more_p) *more_p = more; - return peer; + return connection; } /* diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index cbeb30d8b4d5..843e4b405c42 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -1296,6 +1296,12 @@ struct peer_connection { struct event *t_stop_with_notify; + /* Linkage for list connections with errors, from IO pthread */ + struct bgp_peer_conn_errlist_item conn_err_link; + + /* Connection error code */ + uint16_t connection_errcode; + union sockunion su; #define BGP_CONNECTION_SU_UNSPEC(connection) \ (connection->su.sa.sa_family == AF_UNSPEC) @@ -1981,12 +1987,6 @@ struct peer { /* Add-Path Paths-Limit */ struct addpath_paths_limit addpath_paths_limit[AFI_MAX][SAFI_MAX]; - /* Linkage for list of peers with connection errors from IO pthread */ - struct bgp_peer_conn_errlist_item conn_err_link; - - /* Connection error code */ - uint16_t connection_errcode; - /* Linkage for hash of clearing peers being cleared in a batch */ struct bgp_clearing_hash_item clear_hash_link; @@ -2627,8 +2627,9 @@ int bgp_global_gr_init(struct bgp *bgp); int bgp_peer_gr_init(struct peer *peer); /* APIs for the per-bgp peer connection error list */ -int bgp_enqueue_conn_err_peer(struct bgp *bgp, struct peer *peer, int errcode); -struct peer *bgp_dequeue_conn_err_peer(struct bgp *bgp, bool *more_p); +int bgp_enqueue_conn_err(struct bgp *bgp, struct peer_connection *connection, + int errcode); +struct peer_connection *bgp_dequeue_conn_err(struct bgp *bgp, bool *more_p); void bgp_conn_err_reschedule(struct bgp *bgp); #define BGP_GR_ROUTER_DETECT_AND_SEND_CAPABILITY_TO_ZEBRA(_bgp, _peer_list) \ From 0d2605a5f7b71a6809b278d7a4e54ccf8e9fbae6 Mon Sep 17 00:00:00 2001 From: Mark Stapp Date: Thu, 19 Sep 2024 15:46:56 -0400 Subject: [PATCH 5/5] tests: add bgp peer-shutdown topotest Add a simple topotest using multiple bgp peers; based on the ecmp_topo1 test. Signed-off-by: Mark Stapp --- tests/topotests/bgp_peer_shut/__init__.py | 0 .../topotests/bgp_peer_shut/bgp-peer-shut.dot | 206 ++++++++++++++++++ .../topotests/bgp_peer_shut/bgp-peer-shut.pdf | Bin 0 -> 11891 bytes tests/topotests/bgp_peer_shut/exabgp.env | 55 +++++ .../topotests/bgp_peer_shut/peer1/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer1/exabgp.cfg | 18 ++ .../bgp_peer_shut/peer10/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer10/exabgp.cfg | 18 ++ .../bgp_peer_shut/peer11/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer11/exabgp.cfg | 18 ++ .../bgp_peer_shut/peer12/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer12/exabgp.cfg | 18 ++ .../bgp_peer_shut/peer13/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer13/exabgp.cfg | 18 ++ .../bgp_peer_shut/peer14/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer14/exabgp.cfg | 18 ++ .../bgp_peer_shut/peer15/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer15/exabgp.cfg | 18 ++ .../bgp_peer_shut/peer16/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer16/exabgp.cfg | 18 ++ .../bgp_peer_shut/peer17/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer17/exabgp.cfg | 18 ++ .../bgp_peer_shut/peer18/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer18/exabgp.cfg | 18 ++ .../bgp_peer_shut/peer19/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer19/exabgp.cfg | 18 ++ .../topotests/bgp_peer_shut/peer2/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer2/exabgp.cfg | 18 ++ .../bgp_peer_shut/peer20/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer20/exabgp.cfg | 18 ++ .../topotests/bgp_peer_shut/peer3/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer3/exabgp.cfg | 18 ++ .../topotests/bgp_peer_shut/peer4/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer4/exabgp.cfg | 18 ++ .../topotests/bgp_peer_shut/peer5/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer5/exabgp.cfg | 18 ++ .../topotests/bgp_peer_shut/peer6/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer6/exabgp.cfg | 18 ++ .../topotests/bgp_peer_shut/peer7/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer7/exabgp.cfg | 18 ++ .../topotests/bgp_peer_shut/peer8/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer8/exabgp.cfg | 18 ++ .../topotests/bgp_peer_shut/peer9/exa-send.py | 66 ++++++ .../topotests/bgp_peer_shut/peer9/exabgp.cfg | 18 ++ tests/topotests/bgp_peer_shut/r1/bgpd.conf | 51 +++++ tests/topotests/bgp_peer_shut/r1/summary.txt | 131 +++++++++++ tests/topotests/bgp_peer_shut/r1/zebra.conf | 16 ++ .../bgp_peer_shut/test_bgp_peer_shut.py | 180 +++++++++++++++ 48 files changed, 2319 insertions(+) create mode 100644 tests/topotests/bgp_peer_shut/__init__.py create mode 100644 tests/topotests/bgp_peer_shut/bgp-peer-shut.dot create mode 100644 tests/topotests/bgp_peer_shut/bgp-peer-shut.pdf create mode 100644 tests/topotests/bgp_peer_shut/exabgp.env create mode 100755 tests/topotests/bgp_peer_shut/peer1/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer1/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer10/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer10/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer11/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer11/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer12/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer12/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer13/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer13/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer14/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer14/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer15/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer15/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer16/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer16/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer17/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer17/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer18/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer18/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer19/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer19/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer2/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer2/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer20/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer20/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer3/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer3/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer4/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer4/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer5/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer5/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer6/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer6/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer7/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer7/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer8/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer8/exabgp.cfg create mode 100755 tests/topotests/bgp_peer_shut/peer9/exa-send.py create mode 100644 tests/topotests/bgp_peer_shut/peer9/exabgp.cfg create mode 100644 tests/topotests/bgp_peer_shut/r1/bgpd.conf create mode 100644 tests/topotests/bgp_peer_shut/r1/summary.txt create mode 100644 tests/topotests/bgp_peer_shut/r1/zebra.conf create mode 100644 tests/topotests/bgp_peer_shut/test_bgp_peer_shut.py diff --git a/tests/topotests/bgp_peer_shut/__init__.py b/tests/topotests/bgp_peer_shut/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/topotests/bgp_peer_shut/bgp-peer-shut.dot b/tests/topotests/bgp_peer_shut/bgp-peer-shut.dot new file mode 100644 index 000000000000..290b31509df4 --- /dev/null +++ b/tests/topotests/bgp_peer_shut/bgp-peer-shut.dot @@ -0,0 +1,206 @@ +## Color coding: +######################### +## Main FRR: #f08080 red +## Switches: #d0e0d0 gray +## RIP: #19e3d9 Cyan +## RIPng: #fcb314 dark yellow +## OSPFv2: #32b835 Green +## OSPFv3: #19e3d9 Cyan +## ISIS IPv4 #fcb314 dark yellow +## ISIS IPv6 #9a81ec purple +## BGP IPv4 #eee3d3 beige +## BGP IPv6 #fdff00 yellow +##### Colors (see http://www.color-hex.com/) + +graph ospf_ecmp_iBGP_topo1 { + label="bgp peer shut - eBGP peer shut"; + labelloc="t"; + + # Routers + r1 [ + label="r1\nrtr-id 10.0.255.1/32", + shape=doubleoctagon, + fillcolor="#f08080", + style=filled, + ]; + + # 4 Switches for eBGP Peers + s1 [ + label="s1\n10.0.1.0/24", + shape=oval, + fillcolor="#d0e0d0", + style=filled, + ]; + s2 [ + label="s2\n10.0.2.0/24", + shape=oval, + fillcolor="#d0e0d0", + style=filled, + ]; + s3 [ + label="s3\n10.0.3.0/24", + shape=oval, + fillcolor="#d0e0d0", + style=filled, + ]; + s4 [ + label="s4\n10.0.4.0/24", + shape=oval, + fillcolor="#d0e0d0", + style=filled, + ]; + + # 20 ExaBGP Peers AS 101...120 + peer1 [ + label="eBGP peer1\nAS99\nrtr-id 10.0.1.101/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer2 [ + label="eBGP peer2\nAS99\nrtr-id 10.0.1.102/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer3 [ + label="eBGP peer3\nAS99\nrtr-id 10.0.1.103/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer4 [ + label="eBGP peer4\nAS99\nrtr-id 10.0.1.104/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer5 [ + label="eBGP peer5\nAS99\nrtr-id 10.0.1.105/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer6 [ + label="eBGP peer6\nAS99\nrtr-id 10.0.2.106/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer7 [ + label="eBGP peer7\nAS99\nrtr-id 10.0.2.107/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer8 [ + label="eBGP peer8\nAS99\nrtr-id 10.0.2.108/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer9 [ + label="eBGP peer9\nAS99\nrtr-id 10.0.2.109/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer10 [ + label="eBGP peer10\nAS99\nrtr-id 10.0.2.110/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer11 [ + label="eBGP peer11\nAS111\nrtr-id 10.0.3.111/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer12 [ + label="eBGP peer12\nAS112\nrtr-id 10.0.3.112/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer13 [ + label="eBGP peer13\nAS113\nrtr-id 10.0.3.113/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer14 [ + label="eBGP peer14\nAS114\nrtr-id 10.0.3.114/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer15 [ + label="eBGP peer15\nAS115\nrtr-id 10.0.3.115/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer16 [ + label="eBGP peer16\nAS116\nrtr-id 10.0.4.116/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer17 [ + label="eBGP peer17\nAS117\nrtr-id 10.0.4.117/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer18 [ + label="eBGP peer18\nAS118\nrtr-id 10.0.4.118/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer19 [ + label="eBGP peer19\nAS119\nrtr-id 10.0.4.119/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + peer20 [ + label="eBGP peer20\nAS120\nrtr-id 10.0.4.120/32", + shape=rectangle, + fillcolor="#eee3d3", + style=filled, + ]; + + # Connections + r1 -- s1 [label="eth0\n.1"]; + r1 -- s2 [label="eth1\n.1"]; + r1 -- s3 [label="eth2\n.1"]; + r1 -- s4 [label="eth3\n.1"]; + + peer1 -- s1 [label="eth0\n.101"]; + peer2 -- s1 [label="eth0\n.102"]; + peer3 -- s1 [label="eth0\n.103"]; + peer4 -- s1 [label="eth0\n.104"]; + peer5 -- s1 [label="eth0\n.105"]; + peer6 -- s2 [label="eth0\n.106"]; + peer7 -- s2 [label="eth0\n.107"]; + peer8 -- s2 [label="eth0\n.108"]; + peer9 -- s2 [label="eth0\n.109"]; + peer10 -- s2 [label="eth0\n.110"]; + peer11 -- s3 [label="eth0\n.111"]; + peer12 -- s3 [label="eth0\n.112"]; + peer13 -- s3 [label="eth0\n.113"]; + peer14 -- s3 [label="eth0\n.114"]; + peer15 -- s3 [label="eth0\n.115"]; + peer16 -- s4 [label="eth0\n.116"]; + peer17 -- s4 [label="eth0\n.117"]; + peer18 -- s4 [label="eth0\n.118"]; + peer19 -- s4 [label="eth0\n.119"]; + peer20 -- s4 [label="eth0\n.120"]; + + # Arrange network to make cleaner diagram + { rank=same peer1 peer2 peer3 peer4 peer5 } -- s1 -- { rank=same peer6 peer7 peer8 peer9 peer10 } -- s2 + -- { rank=same peer11 peer12 peer13 peer14 peer15 } -- s3 -- { rank=same peer16 peer17 peer18 peer19 peer20 } -- s4 + -- { rank=same r1 } [style=invis] +} diff --git a/tests/topotests/bgp_peer_shut/bgp-peer-shut.pdf b/tests/topotests/bgp_peer_shut/bgp-peer-shut.pdf new file mode 100644 index 0000000000000000000000000000000000000000..fff66bb39bcf873005e6405f5dac7ec68e7ffcae GIT binary patch literal 11891 zcmch7byS<((r?k?P}~YZi<5*vfZ|SZr$})LF2P-jJCx$Cg%%1et}RfsP~6>H-2Fmd zIp>v&(2E*r_4_6ov01#krY=t2p0N|82 zv$KF(0=Vy4su%zOfKv))12=QJf7=+r%_PiB>`l!ugoH4h;Z9~owixaibGkitPwR>1 zCbUKmVF#8tZ{MzhQ3FxQ(c(I$xjo~%t1p2O8i@1ULGC~_L*%W(Hy ziFu6XZ-YXMTCCH;0xo`gO>cZ|W?Qck_k{1Jn_Kq+dG~w>$qcgX3sEN2SvZ_sb3c>n z_}z^*Uwu0BS_r+pRXZEm9dx_;bamY~C$x9j*!;`a!@EhYv7+hImBHRAXs6|x;PqXb znGtMyXD<*(D|T=1l*Xlzdh*NO&RXtMuP~ih11}-Z@$Z9Y!mh2&drj8(i0kY6r@1g~ z4qzu9q%lpG+S^6+s<^PgDdF=S1=Z(YOs;gB?FSV8b1Cs~aZe)QdMXJy< z9EPz*mLq_3Z?n`yVARtHDni3JDii(^2vO7yhl`jEhdG~(8}sF1G=h)z_vRZN=f- ztAjEMj46`cg}K*Fa!nDKSFEVoJ-0gV=&StL+6gU;*K{)Nhrf|#Dq)NCGsxM(8dosq zcVs`-_BSbQ1HogtmHwQYII4`BSV|(Q&)Qf;Ffc-N$YpfVBhJA#MT@p5Y~hewL`fW{oeC(?Klq4IWx4uWEHqPj=H`kyz%Fv5OBqDHtWR zF_LUP_yuETjx%F!_Ea*KVdwZJWb$Q&fmfW#tm!cwP5J7Kx^oNljIZ_!3562{Ym~WLeCcr~f41VOh)Nwak|b|blu-I!(6m#F z=@MC&nhUe73@@rE_a#DBZ%3adg%9FkmA@N;Cz-@qkGqV}XGCtXpGb=b4VsoaGBA@qSy-kaPKkY&<0Mamb62Rr?01#ca=d4fnJ8*{Lu$6%oR1VBo2M# zI$l$Ol8r6EY7cf`HlfH*y=?~q8pfb0zNVcl;ZNy$H1w*)P;PPJAvuIsMO-Y`;ii5a z$xM!TbDq;WW#|btfwc!>P3&gc$Lr}T`!ME!l})3pNKzO%a-|4UI(pu_gD7ZPRKT2` z5nD>ScZwWKbAwLtG?C025c2#!?B*tyuHlq}>KAKa)HqLn-Gy$In?~|P=pXH~g4GYp z=3_G$-Kr#a4R<#)Q)WAjor;&Okc)Gj$mK=$CFOT%-(%b3tl8E7)^xaSQl|*Yn#LBU z$};!tE4k`ISV1uXii|+xEWP{dd3vpQsVjwcISS+ohTHrmZq)LoFi9tOb6_Psx;Rbl zV#x{Li2?P*yZJ}K1m)|x7(uzU;6vibCbKZbknXDk4&3(0>73lbz>a*_%{s^SE>c=} zqm=Kb0F|Ekn58_&c#2nBMQ+Dqb(TbRDr<0f;@Ml#TJMU@t&FaJ-8t0@WqP4yi zo8w1A?Ce=F9Lr06IWsL<@EMQIi1U#jL>*Tb;PaTo^~L&|c%qMeZU~3VWEnV4V#i%! z+DqK_TCHD{$z!(D%{>IWjTjPbS$A_#;}qGGPlsLj$+nK!3k((>=@mz4C`zY&oMDKM zNso_#O7Lfe!e1}*l5JTd{AF>w=V%@Mn zwG<^AQcvqBhpp+_P`2(j8^l6Faw}Jooj8>!ZA~E%QyTBaEH)5rXPLFt327c|(#`6)(^JMQdx% zlUIg67ospMs9%m1c8q3PKCV_`dh(nGx3r-P$${v36882^zt#(ZZh=LQhJuzLoLWL! zMkfg4JdjhqDzkOw;w7u4?M-sSNfrBETd|%y<@JD|i~Kw%%|!_5mu&eDc_d5Y(}w5k zv!2{ISv#JxCWxYN3`W6rZIVU|2MG@z;>d;+QbfpecLgCz2iCR4QQgzFICen$Opn6x za@`Fpncr(>0izj&))QwGS6iW1i?GY^G!x#*u6|O;$tZIVhpd-^gk7FGw})lrjq!q`w6WU$u)7{FDvSo*P}`` zW?$$E;`Oho+m*hJbg3cZU8d0#1tcm(92JGYP1=y;vqNogUw72E?XZJO6EB=4jN^ppW0smduROEzTiRot z#t~;{&*p|6u6!sXMZ&B4mucZ{spy!?M(7AnAlj&_2p|c|_nfBF{T!11${2GTaSk%c zrudkHF%d% zT7w?oV6zMe{)~gBjzu=w^~S6Q5y1%2=KZ=30b*A;;{g})yHjTI(8#3~w6(|P`^!WE zxjzer;6;RWC{K6{+k9fI4IOOOtp^zUkx=EIz1LQ1A zeMVo$eLN(CBpTfNK1L=6vo5RDW9<=Az-?Kl~xO*-M!L`I(SYG zA<)FHLfO5ZjyR+aV_M`Lp9m48q_@M$cOi7fxU)a1VrZ`i-wlS6O1GflYmC~AaW`u0 zI|^A*ITuAQOYd1Q-k8MQ_pvU%Aw-K2Y%#~N<3_70MFdhTX((VJ&>07sQZs69Lbn@f z(I9V97hL*L3|MDHj>y&cPZtfBm@>X5ZMK2vVtm~c@Lrg^MQBgfRwTepTlb2B&}7gqgP}>EJnU13M8$gmFcGV|Fp_PRQn4RcyB|dH%1p-AXjq`#lxgIo$8;;EPj( zW;9`gl+D8MJJj;rw;wRf>`ecx4emz|RmDRo@$kMcCb)pSd=CN-B-h_X$L}&j(j6|X z2EQ*c00IIJjQbC#5%8V>aEglpKmbk?qxO34EQwGfcjKO{i_US1&NAGO%p zOAl}Lv1Z|o+uc;3dbi|$(=Rru^y1yy!FY)AuZZI%X)#VjZRm{=O#?&_0yKiIVWUW)%p^d!{kiR&a{S8Zjqwajo{C#$A? zrkN+%jDg`*?z`>ByU#v3(9?W<@o}#5{B}(4fj>GA@78niTq$XTFgyKdA4^9hHG{&sIoq$r4w36*5Oz@Cr(*oP5n z%|3B^D6YV=3vyj#S@__H_ZU54rf?NNkSXH*9KQRMSO=+3Cc02!!cuOZ)Okm*L2dOb z{d1Mgtq2KzIze|aO)U#8gSSKM@W>C>*T3jaXkJ7fbtu~@Q%XD2@)QSQG86@8w(D(& z^@T_EK261cG0NMBGjgahIOeL?IIeOv%jEHl-7nAasKz#(BD=&;Rq}3m*yB*Qx%u$I zqwtD#vv*`e_-50My45A3L}24c>q53eX}AZG`B|oRsXi&Ozq;y*!kp2>&X2UT^P!k? zDKGRp6MO3rA@QGbxLDO8vUU;D=_F8E@y)GYs$_T#Dq{NjnHV>%cPjwg8t?13>|3%$ zaWAvU23BDpqNn7q!)Ga}t03E&R=K$n9Bu{d&+s>TvH4+9#iV}TiVJVoiZ_xbsVcn8 zi@z+_PRxrCZ_(DTg?F*f)Y!tj5fzW;uBK|4nfDK(uK{p0e z>qSYpEE{XRH_#-;XX=_P4PrKPLC4RBBrq7Li=D^u<~YVpouKi>Hqk{1M*Phq;`k-s z_>EJ^Mkjxfg95c=T#wo)fT7o zu?fYNJZ&Ju2wH$!{41!3>Wk5>ohW?kXc=w2{FerFHRRa*96@MXJo=MKc0Dm#{)Agi zqYUtzr*JpvRp$-mwv7(&Cp}c`cmWt0j}SS5?aUqM9POEF@nI4SiiqKFC+Uj`pViaX z62z`2^&pThLNJ_1^1n|xzoI)pDt2U|iiGI&QwY9|pm`&1ArD%PYgt~L!_egQxvA;9 zAvu-a%2Ro41*Te5TeII~6L|7QkcbG_c>Q_%MdX*2!+PRynrq5AUSiUI{THDWRF10%U&U!&|cGu@p&E`&oYjo>sCX~~cmM9Bgs zB#V@nT&ML@$zjq?;(c~pcX!aX(*BKr8khSj9ciF_y<>gI_wKgscJa33hNdD(GuyS> zvD<@bH|}TLmGU+7H|CAfI<*gKyJ`o?#;O@m%TlzTY-zI2(lM%RV|+{k(iz#8c;^uc zoE3{ud+*I39*yI6B|dwfrz(V$+bay}jWvEW>^z_Fy*>RB^Kn8`P+P$5P_Wjt;LfuB z^k$2$MZ0c!xAECLH!YpD^U3$O!xbwbds5Wf3}dPd-*UOtCUlTSE6vb(Fo(#+^F=yz zcfJmDkHftt1wxe0EWgla_MLX(jgE{l!&zL@G=}DOB958AsGFwxu=N@2#a=F<(t5Md zdTSepb%*DQoP2f^uMDfFv+37ti#xK>fZqwFE>EZUlvSRDubgr&9Vh4v7Xc&A+gp**3^FK9_CHYdshO1*^p=4nY zAjT)uFw`tz986a)^;!=%-a!}l04qbYM^GmlPhs(c^NLFS0n>=_InFyuCgRM5vO}UH zMdPqW`3=e>w+#U`kMsH)cvde4^=`VhV_x3nH4#T^)m=}uuF`eXD~*e`6jY;{sJd?2 zo}Xa6_Sw}%mjbD;pZQWCI>d9DVV*1yg|>go9;zcI@1!aPl=uj%dMt#&={F8)z4IB@Vu(09=^6JKVAGq*v3BMkP0v)Bv)GsIfi zTCh|y#{0c&vQwB}DF?)kEr|0V%L}`(mL?6O*V?OG+r_u>9H04}=k=rw!ah#xy z=?>PNkGMv78FJ8?q|eAtn?UcBPIw9{J5kJ?=3Y>IcY-^E9F*MPBO8^%j^4yTY9TjF z@Z*WRrk+jlvT0)yAg)4Uj|1yTN$vHoRYNu`fd427WSF!DPYWkn6GSQ`;{}`kaU<;U zvW0DLrrtT5t%Bbp;c9hc@N}2|BINqDcVXU1+mVunK$EYyzP&RF-J6O}6>}!etu1?J ztu#R`q0Qf0q>kq~7}6w~>!;rtECWvFRkfFy-IC73$E-o&^Ms-%jtp1vvvQd}Ppwj< z=u~JYy&}v^sWFkOklauS8$}Xv9P&g6<0C~SqC#*{&|Hz}n&eDxhxP`0s^qy3s|<+y zvPZsuK71{HC|aLUGgObQK0DADb0;svBE6Q~_gFf%C|3Qn^|=YEPt5oHqb;Q_;%=`b z-tz9}uim%`q%xt%em(*v)=YyJ%u2?sOGkPe#e-`n`!5T4X~~1KX!tPw2u*}QWzP?> zyFzz=(H-HJ;5VU~3DO%=y{1rYi$xq4oSMEY@Kzw&nTm}+oR3|D3U7Q6oU1BhXB;cD zP;@)Jvl}w^dV(j&q=xj89eYJCHAJ*DWZ)6DQfiT&^jm^v%~f;@LJk-BF>^eI#(ZuU zQz!Ju0ETi~Rv#Yb78K}~AK^v^!dcR%1|Xyr4ubbP#Wii!2gXbKH8z-xE$VwPE@5GZ zhmD`C=+;=z7g}z$wd|u`JS#qz`4Ft%DX9q7a0-~$T(KmQ=O#uZcWz*|sV$0}a@@qD z^T~WW+5sL8of0{XXC!G}x?3L2U#aYfsVVy)xZbxj5wx66Pz`>j*xj3UUh(+qj!=@i zJK0PzEh&}T=*vQg39!kF30lveayHLg*l~ck-hA-9|MscQ$F3NUikgnu?y2ES#9$bI z&(n2Klrp|S=Ta%vUSl{XV-yXII(0+{&`6%@&{$kHPmFMNK8f*=Po;iM%GEuN!;-oe zhn3}YK?c2kG@`XxUnqs}os(WD9Gcq_y*VeAkeQSUF{^P>3#V|fZtk*zvr4#j%1dp2 zSB#mIK&a+};HR_3mlWk)ukq|y!KMmFTYK-MI$KYbMQ=U9nX(o?rr?a^9g&A}PR9tJ z5*a?F)pSR$+8(-k#7ER~A!%*R6i~oOeMZY^I_UMbf2L4aE83a!9@YMvNLS|XQUO^R&A8mb!C%rAYTNDG>!t;}(y`1?W z*MUldzDx%2CGEy0d(e*M?MzqZ;~J)8Dv9QDQR<~ycOdjs*O$*j_*S+Hsm(%93YySvuNLQ{l zpP-Ede3j&G-jeuiBs+QUMP`>Q{H#E_$7U$6;Lbh zyY{I_h9$byci|*{=2E!KD)&vbv|Ymn=P0`$j|g!~t4-?d)U#7ccVGJ-A+uhSYd(j+ z3h3)MA}dz!!d8bm*auoYZ+=O_XeZux2V==E#^lKRN5x6t?91};2hptwNHdu>=NCl^ z?sX(cxV|dg40o~&tNwiAR{ODUfkX3PJfiBoG+UGiTftVdneP>bZfaS-y@I}IuWE13 ztWE;){#J9YIubezLKWR(1mNLN7*MSEN_*!?s#qrgAwgcQPlbpxN%-KFKO0<>(%x3BANNoAHteqXZ}eie-UiV| zzkiOi7t_*YICJ`qhg%@3=1Q;wPI$2hwA*`=P_No7sw9VvdV^&Jj9()rY;zLlbKrj^ zF2SW8&HtV8SdT^IoBmL4j)ZJAhfy+kvk9fsi|cq^y6Sk7J{IK|!MP0)BrPatr8;EN zfO-rYLn9U?Ny6&hqo|`=)!X?AJxHWm;YIx~dNXcW7Sfbo ziq%)sGZ^l726U}MV8{Ko@}g>+4NIRx5PfQr%As2Wc}@YtXgVqu(5Zny$x4hsrlK^} zf!DWRM2vS6UBD_J5c@08tPQZt-QlYUU##uH7E6o|vsU%m(csaE7iHkS7Fb$3I>Hb*S z1u7m&S*Oh?%n^aXypifx%CV8M$}Uo-K1V;DP0+TvK|QU`>6=5Pq0)B~@AQMPJK7!6 zooXZ0qGqi=>Rc?@)S1_L)G_GGSC@`cjnb9>=or;T7i~*2fxe}8J5%zAa=uP-7W#45 zbK7v+)!>uy_W2pq9;NJ;PgP!^V*JeGqi7P9+U#6AHJEl<&O9~B$|8y?aIQ{dJYP9E zcx5}YFpHt)kb*>}EU_A2|D9U;sse-NZfmPZ7XWZ~gQ7~>Ad0b#JxnUor2ta{QUg$?L1G5saW-80To7;CH8kH;` z1=OuexX0712CUgW-%g9zR`NJ1FjcU#r*$#cs{A;ao3T`7V>sWnRX-4V_O^PZIEK=| z^;h&6j*8r1o^^Cz$5+n1iyrX}1CJ|)3#CJ?&q!)Oerk8uqlR}x9wy(ieQsCn!NaT7 zUOd98x-Zq@+BYj~rLU-yQ7{E+M2D6TwLU>u6>yS;I}xN+0JcXV}w;5U@bJ1;a3nul=|L45*L z*mf`9f+?DBHEc{VGBVs94))d%wlAx)b=DFOr7d0L24Xpcyi|qi%{IQX>$=uB2xkdJ zr?u3*ko3sBj%w0rnb~O6)RauTkbv|XWJ-UHGQzvrG_ex874N~!@fJdO(Ss!q)P!xQ zEi&QFi$F471!(mrc{X(X)Fk90=I)|ZK7OY=YEsW!#d5>qf86W->u@{IZZYB{Wd${p z2GlJ)zHm$b_J=_o{)fu#dD~yfu9rwBzBNY5r!QiXCA(z9W$l=pxBI-utDda2B>bYU z?dkf$m@EoGSBz$)gqX7ipJKg!m%F;tY`4bfsWy!UZT@Dg$pm6{TP=|i^XaM3?CJ5+*ZsxI!f!yxQM5Sl;}IB#HeXOjzwM*^;vIB#X79I2LWvgyzKbky_`~menP2 z?tA8n5Jtgbp^f}vr{Qv&|6z05ryq?l^q2|km>EZbyUTOoTjm9*P+lX9buOsTwld@E zA?76}<;LC83%ZXh18q{}DPxWM!@Cm4UXHa!e$c_7S*)A>J2R5VOX!kcPP`$O#k;~S zWm@Rnqp}do1F53BVy|oAI~AmtF~io+>`gpK7Ipzd?jx_Hr}uBTRLf4Ymt0=7XW^!R z&-8~C)f}mc3HyzRF0wm(oE&~}Wwbt;u{CYspT{H_t)k#?C>s**@2E}Pj^G)_G4Y_{ z=y=z#ZR^D9@rF`)ehOtc>S&iyt5Uvgt~j1x73q?6N}4l~zbchYi)OPB^;nchd%e2P z>LTr0MDXQSnc>?5*C{EMxIu!ts}^lxg+e!RbhaGlIPn-Nf9A=?Wc7Up?JriT1&1Un z0V`Gq*P^~XD7%mKSg4C0?^CWtVbN_rl72+NSITPnq2;{zB6KL3Pr!n zU8A|k5Ne*O`Dhd4kPr~)jFa8AE;UujME^6-+i5e=4{|Omb$OiXjQHcIR)Ku>q$WXj z5^9ap!ovQm>vo3GtivX%u|j`-24bA`d2u1!pm=$GW!_)|SFqKm)h}wlW!J=pg)|pr z_#{uC&FhCq^Fqr)t-8Bk0&qj-5+{LmUJ8~|Or|3VmM1rb?Zp~u zWKp2PQi6$&WFkH}+kSkPlSm!@yj=Qm;GPb2+^zDJ_-S77=E140$R-)^mJG9U*>l9I zoI01nTH@PlFYOjUM}+fh;#KNzHmu4On|G9Tq1BQXezF?mzKTl2GLbCY0l&oTCEe+6 zSZgj^KZstQE0BGkT8fULKb2x`qB(dfyWc?eJqx|My3m0Ut2lB={b>g9iz8jrWkgUj zd4Lw>5LG~SpM!kJTeI_2ENN3-%I3_cyRSEQ9ArX*T=o;NICU0ZBQc%^BH^SgUZ7on zBFs8!Z$T5dl4thP46%5zv}KOz#ZNY^NujL^{*{U2g?#SF`02yhM@_P^uEtHX1Dw^vAwHN(im=gOeYWWZED0rp4H4u5%g60q^H`}em!+% zk@fVQ(L|at53$`zl2N;Ok2>j@X8-PkiNZIdK02uy$zXQ;R{MIrZ)R(;<#_gC14yJ zGORid5chf0w!N~n#EzS~r8>CTX~ysmW7~v-?SG$sL9`YZMGC z1ahlVOZPwwIJM;rxc%25pPoBxjd=8DFHGm=hl=vos4H3 zA>a4q)(NbYb1WV^)LETH!&=4Az>lb!i~fk=)Ykr>3m%>#(nIM;wguFD$!@N%Ok_^d z{MsL|=$wDq6d*a#q9&V!cC&jDgGA_yvUQK+GS|$f5&^6uJyo-DE@^|uWZFdpV2X1r zALc&GkYmVWxojL*w?rD!FugBwf>0izYN$o^=mC%togxd{$UE3fh776494f~VQV*QE#Vng7U=;rAB~lRN!Ok3u_Ub_~CI)T{vV+$G77z0nBTB93*_+XGFA z$@=R8rm)S!p*@BjAu8nh=%^VcB}hKZo1bV$_(WI(LnKp*a7aIxJXv()5<+rN`~C<{ zj7MhqoL#>$wdf}MCjQPA@pcP0f7lNZ>&=Sge*zo7@rpl@4IVHA@;A2eFM#JS!~y_% z=rsF(Yb(=N?XwF4;x%2ddduTJ310glgI4;2lk!p-ChR}?b5s?SO!N4*<&}NX3&FOS z39gm+iK$Y*-AsM6Qz~=rMV4q6{zb4skeEkTq|36hjP01GZeeSh6lWC^RgGC^shc^m zEo*gbU46MxV&gNx>;q95=IYuUkhWWVW5r0@`=#w0T*Xbz)W{Es$Zme}=JqGJ zFZ`^tpRlDi^NSiB$Y&eAY>OW)cRa}d0&KvH;2C^AMLvQKzg(bNW#gkb5k&nlE?Kbd z!S)0il^&KZu%s=a3*DW&gH2pK|MKWxNZtb|_Xmph0KrL^Ih#1a9N_j&e*k6oEJY*R2Z&8g zUQOo(tAv@Ak*159nG?+XZviPITbPXpfa#Ap!0$=sKW1%=ESv#c;J-!1#O&Pxx&U@4 z4%)dcYVvHpRO`HOhC;yn;&2EG^m9lifT6(6w1hsgh@ zJ^t9w#Tfn@XH<7`F?+DpZ?2e;v)OOd@_)3=1L7!dY2@^m1>}wXq(LC)?=AjmaRBIF zjhd2n_q~}gI|~4(7R*l6&KdR>mq{tql3UTzK`H!t`9Vu#;i5FiJLiw6pN0C0K!MR4=o z_s{b2@IoIB`Zqog1i}G?fFIQ30`cAJ0s;SC`j-~}5C#GuP+kro`2Hdg05^z>1H{Yq zpb!KIx>uF^w|)P$-|uk0|L56j*ufs!Z~>r)6s!7gFgNJolz&E~(tqIGpa(Ggk0h-4 zH@N)(bc4D6h_L@l!>>ksyVX}9p2Xg5M19h9Xcz0Vh;&1zFr?h1Ku1qjK{QF;5Mwe~ zj)4WOs+FX?5g~I(H5-zo4Yr~RYS+sSCpKp)B9!4X>xwwdJQW_qxRx4=NL*9E=sBfI zY3TO=$;P?Ok|-fuRk9A%Pf|hblShGg1{pthcod|kNq-pVz(}ScvS1&RB!~CAIA54M zLT#m{B0#;cug=R#9n|3oe5)12RNnA?tuG}*bP$U6bpBf73O{fC!&tMZ%ASd{@ysL7 z$gw#Gi07sXp%K~VIRdhgYOXKm$+zSorGla|GfQjDib{Vse zO%_>ADh;KLCl=?m2Fw}f!vnU0et|QtuZ}SjPwk)lmn8aE&%u2X1w;RCd-xp;|8$YI zs@Wfz@y`wb=pSk8e?tG&V4h|G@E@5<3!n=EfC1n?GNG!y{e4RNy90t#*3R4>0OI*8 zNd9iwHZe3jzsn8}hzvlG0D=xb0f9j9J`#GCUL5J;xWX-*WnX0crcM#DE7Y(t1o}~L z5ebaAp}5CS%<