Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

memory: topology: expose per-numa memory #268

Merged
merged 2 commits into from
Nov 2, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 18 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -262,10 +262,24 @@ function.

### Memory

The basic building block of the memory support in ghw is the `ghw.MemoryArea` struct.
A "memory area" is a block of memory which share common properties. In the simplest
case, the whole system memory fits in a single memory area; in more complex scenarios,
like multi-NUMA systems, many memory areas may be present in the system (e.g. one for
each NUMA cell).

The `ghw.MemoryArea` struct contains the following fields:

* `ghw.MemoryInfo.TotalPhysicalBytes` contains the amount of physical memory on
the host
* `ghw.MemoryInfo.TotalUsableBytes` contains the amount of memory the
system can actually use. Usable memory accounts for things like the kernel's
resident memory size and some reserved system bits

Information about the host computer's memory can be retrieved using the
`ghw.Memory()` function which returns a pointer to a `ghw.MemoryInfo` struct.

The `ghw.MemoryInfo` struct contains three fields:
`ghw.MemoryInfo` is a superset of `ghw.MemoryArea`. Thus, it contains all the
fields found in the `ghw.MemoryArea` (replicated for clarity) plus some:

* `ghw.MemoryInfo.TotalPhysicalBytes` contains the amount of physical memory on
the host
Expand Down Expand Up @@ -600,6 +614,8 @@ Each `ghw.TopologyNode` struct contains the following fields:
system
* `ghw.TopologyNode.Distance` is an array of distances between NUMA nodes as reported
by the system.
* `ghw.TopologyNode.Memory` is a struct describing the memory attached to this node.
Please refer to the documentation of `ghw.MemoryArea`.

See above in the [CPU](#cpu) section for information about the
`ghw.ProcessorCore` struct and how to use and query it.
Expand Down
1 change: 1 addition & 0 deletions alias.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ var (
CPU = cpu.New
)

type MemoryArea = memory.Area
type MemoryInfo = memory.Info
type MemoryCacheType = memory.CacheType
type MemoryModule = memory.Module
Expand Down
5 changes: 5 additions & 0 deletions cmd/ghwc/commands/topology.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,11 @@ func showTopology(cmd *cobra.Command, args []string) error {
for _, cache := range node.Caches {
fmt.Printf(" %v\n", cache)
}
fmt.Printf(" %v\n", node.Memory)
fmt.Printf(" distances\n")
for nodeID, dist := range node.Distances {
fmt.Printf(" to node #%d %v\n", nodeID, dist)
}
}
case outputFormatJSON:
fmt.Printf("%s\n", topology.JSONString(pretty))
Expand Down
44 changes: 26 additions & 18 deletions pkg/memory/memory.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,37 @@ type Module struct {
Vendor string `json:"vendor"`
}

type Info struct {
ctx *context.Context
type Area struct {
TotalPhysicalBytes int64 `json:"total_physical_bytes"`
TotalUsableBytes int64 `json:"total_usable_bytes"`
// An array of sizes, in bytes, of memory pages supported by the host
// An array of sizes, in bytes, of memory pages supported in this area
SupportedPageSizes []uint64 `json:"supported_page_sizes"`
Modules []*Module `json:"modules"`
}

func (a *Area) String() string {
tpbs := util.UNKNOWN
if a.TotalPhysicalBytes > 0 {
tpb := a.TotalPhysicalBytes
unit, unitStr := unitutil.AmountString(tpb)
tpb = int64(math.Ceil(float64(a.TotalPhysicalBytes) / float64(unit)))
tpbs = fmt.Sprintf("%d%s", tpb, unitStr)
}
tubs := util.UNKNOWN
if a.TotalUsableBytes > 0 {
tub := a.TotalUsableBytes
unit, unitStr := unitutil.AmountString(tub)
tub = int64(math.Ceil(float64(a.TotalUsableBytes) / float64(unit)))
tubs = fmt.Sprintf("%d%s", tub, unitStr)
}
return fmt.Sprintf("memory (%s physical, %s usable)", tpbs, tubs)
}

type Info struct {
ctx *context.Context
Area
}

func New(opts ...*option.Option) (*Info, error) {
ctx := context.New(opts...)
info := &Info{ctx: ctx}
Expand All @@ -44,21 +66,7 @@ func New(opts ...*option.Option) (*Info, error) {
}

func (i *Info) String() string {
tpbs := util.UNKNOWN
if i.TotalPhysicalBytes > 0 {
tpb := i.TotalPhysicalBytes
unit, unitStr := unitutil.AmountString(tpb)
tpb = int64(math.Ceil(float64(i.TotalPhysicalBytes) / float64(unit)))
tpbs = fmt.Sprintf("%d%s", tpb, unitStr)
}
tubs := util.UNKNOWN
if i.TotalUsableBytes > 0 {
tub := i.TotalUsableBytes
unit, unitStr := unitutil.AmountString(tub)
tub = int64(math.Ceil(float64(i.TotalUsableBytes) / float64(unit)))
tubs = fmt.Sprintf("%d%s", tub, unitStr)
}
return fmt.Sprintf("memory (%s physical, %s usable)", tpbs, tubs)
return i.Area.String()
}

// simple private struct used to encapsulate memory information in a top-level
Expand Down
28 changes: 14 additions & 14 deletions pkg/memory/memory_cache_linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,14 +80,14 @@ func CachesForNode(ctx *context.Context, nodeID int) ([]*Cache, error) {
// The cache information is repeated for each node, so here, we
// just ensure that we only have a one Cache object for each
// unique combination of level, type and processor map
level := memoryCacheLevel(paths, nodeID, lpID, cacheIndex)
cacheType := memoryCacheType(paths, nodeID, lpID, cacheIndex)
sharedCpuMap := memoryCacheSharedCPUMap(paths, nodeID, lpID, cacheIndex)
level := memoryCacheLevel(ctx, paths, nodeID, lpID, cacheIndex)
cacheType := memoryCacheType(ctx, paths, nodeID, lpID, cacheIndex)
sharedCpuMap := memoryCacheSharedCPUMap(ctx, paths, nodeID, lpID, cacheIndex)
cacheKey := fmt.Sprintf("%d-%d-%s", level, cacheType, sharedCpuMap)

cache, exists := caches[cacheKey]
if !exists {
size := memoryCacheSize(paths, nodeID, lpID, level)
size := memoryCacheSize(ctx, paths, nodeID, lpID, level)
cache = &Cache{
Level: uint8(level),
Type: cacheType,
Expand Down Expand Up @@ -115,53 +115,53 @@ func CachesForNode(ctx *context.Context, nodeID int) ([]*Cache, error) {
return cacheVals, nil
}

func memoryCacheLevel(paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) int {
func memoryCacheLevel(ctx *context.Context, paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) int {
levelPath := filepath.Join(
paths.NodeCPUCacheIndex(nodeID, lpID, cacheIndex),
"level",
)
levelContents, err := ioutil.ReadFile(levelPath)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "%s\n", err)
ctx.Warn("%s", err)
return -1
}
// levelContents is now a []byte with the last byte being a newline
// character. Trim that off and convert the contents to an integer.
level, err := strconv.Atoi(string(levelContents[:len(levelContents)-1]))
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Unable to parse int from %s\n", levelContents)
ctx.Warn("Unable to parse int from %s", levelContents)
return -1
}
return level
}

func memoryCacheSize(paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) int {
func memoryCacheSize(ctx *context.Context, paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) int {
sizePath := filepath.Join(
paths.NodeCPUCacheIndex(nodeID, lpID, cacheIndex),
"size",
)
sizeContents, err := ioutil.ReadFile(sizePath)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "%s\n", err)
ctx.Warn("%s", err)
return -1
}
// size comes as XK\n, so we trim off the K and the newline.
size, err := strconv.Atoi(string(sizeContents[:len(sizeContents)-2]))
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Unable to parse int from %s\n", sizeContents)
ctx.Warn("Unable to parse int from %s", sizeContents)
return -1
}
return size
}

func memoryCacheType(paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) CacheType {
func memoryCacheType(ctx *context.Context, paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) CacheType {
typePath := filepath.Join(
paths.NodeCPUCacheIndex(nodeID, lpID, cacheIndex),
"type",
)
cacheTypeContents, err := ioutil.ReadFile(typePath)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "%s\n", err)
ctx.Warn("%s", err)
return CACHE_TYPE_UNIFIED
}
switch string(cacheTypeContents[:len(cacheTypeContents)-1]) {
Expand All @@ -174,14 +174,14 @@ func memoryCacheType(paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex in
}
}

func memoryCacheSharedCPUMap(paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) string {
func memoryCacheSharedCPUMap(ctx *context.Context, paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) string {
scpuPath := filepath.Join(
paths.NodeCPUCacheIndex(nodeID, lpID, cacheIndex),
"shared_cpu_map",
)
sharedCpuMap, err := ioutil.ReadFile(scpuPath)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "%s\n", err)
ctx.Warn("%s", err)
return ""
}
return string(sharedCpuMap[:len(sharedCpuMap)-1])
Expand Down
Loading