1
0
mirror of https://github.com/taigrr/wtf synced 2025-01-18 04:03:14 -08:00

resource usage mod

This commit is contained in:
Nicholas Eden 2018-11-12 01:30:24 -08:00
parent 748ad82967
commit cab27c62ab
36 changed files with 3346 additions and 56 deletions

18
Gopkg.lock generated
View File

@ -9,6 +9,14 @@
revision = "dfffe386c33fb24c34ee501e5723df5b97b98514"
version = "v0.30.0"
[[projects]]
branch = "master"
digest = "1:d1a104572273cfb58e801812704dca4fe2eab80f3b0144592a722a76c280c598"
name = "code.cloudfoundry.org/bytefmt"
packages = ["."]
pruneopts = "UT"
revision = "2aa6f33b730c79971cfc3c742f279195b0abc627"
[[projects]]
branch = "master"
digest = "1:636ac9f696c988f0038afd43592f7a0fff29038588ab1064ba8ba3476bb41091"
@ -73,6 +81,14 @@
pruneopts = "UT"
revision = "5f41b7c9d92de5d74bf32f4486375c7547bc8a3c"
[[projects]]
branch = "master"
digest = "1:080689a7cb710dc8f694005f8e5b3aab861d01e885eaf53fae54431c4635bfe6"
name = "github.com/c9s/goprocinfo"
packages = ["linux"]
pruneopts = "UT"
revision = "0010a05ce49fde7f50669bc7ecda7d41dd6ab824"
[[projects]]
digest = "1:2209584c0f7c9b68c23374e659357ab546e1b70eec2761f03280f69a8fd23d77"
name = "github.com/cenkalti/backoff"
@ -379,12 +395,14 @@
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"code.cloudfoundry.org/bytefmt",
"github.com/adlio/trello",
"github.com/alecthomas/chroma/formatters",
"github.com/alecthomas/chroma/lexers",
"github.com/alecthomas/chroma/styles",
"github.com/andygrunwald/go-gerrit",
"github.com/briandowns/openweathermap",
"github.com/c9s/goprocinfo/linux",
"github.com/darkSasori/todoist",
"github.com/dustin/go-humanize",
"github.com/gdamore/tcell",

View File

@ -97,3 +97,11 @@
[[constraint]]
branch = "master"
name = "github.com/zorkian/go-datadog-api"
[[constraint]]
branch = "master"
name = "github.com/c9s/goprocinfo"
[[constraint]]
branch = "master"
name = "code.cloudfoundry.org/bytefmt"

View File

@ -5,6 +5,7 @@ This is a demo bargraph that just populates some random date/val data
*/
import (
"github.com/rivo/tview"
"math/rand"
"time"
@ -20,9 +21,9 @@ type Widget struct {
}
// NewWidget Make new instance of widget
func NewWidget() *Widget {
func NewWidget(app *tview.Application) *Widget {
widget := Widget{
BarGraph: wtf.NewBarGraph("Sample Bar Graph", "bargraph", false),
BarGraph: wtf.NewBarGraph(app, "Sample Bar Graph", "bargraph", false),
}
widget.View.SetWrap(true)
@ -37,14 +38,19 @@ func NewWidget() *Widget {
func MakeGraph(widget *Widget) {
//this could come from config
const lineCount = 20
var stats [lineCount][2]int64
const lineCount = 8
var stats [lineCount]wtf.Bar
for i := lineCount - 1; i >= 0; i-- {
barTime := time.Now()
for i := 0; i < lineCount; i++ {
barTime = barTime.Add(time.Duration(rand.Intn(10 * int(time.Minute))))
stats[i][1] = time.Now().AddDate(0, 0, i*-1).Unix() * 1000
stats[i][0] = int64(rand.Intn(120-5) + 5)
bar := wtf.Bar{
Label: barTime.Format("15:04"),
Percent: rand.Intn(100-5) + 5,
}
stats[i] = bar
}
widget.BarGraph.BuildBars(stats[:])

View File

@ -2,6 +2,7 @@ package main
import (
"fmt"
"github.com/senorprogrammer/wtf/resourceusage"
"log"
"os"
"time"
@ -167,7 +168,7 @@ func addWidget(app *tview.Application, pages *tview.Pages, widgetName string) {
case "bamboohr":
widgets = append(widgets, bamboohr.NewWidget(app))
case "bargraph":
widgets = append(widgets, bargraph.NewWidget())
widgets = append(widgets, bargraph.NewWidget(app))
case "bittrex":
widgets = append(widgets, bittrex.NewWidget(app))
case "blockfolio":
@ -178,6 +179,8 @@ func addWidget(app *tview.Application, pages *tview.Pages, widgetName string) {
widgets = append(widgets, clocks.NewWidget(app))
case "cmdrunner":
widgets = append(widgets, cmdrunner.NewWidget(app))
case "resourceusage":
widgets = append(widgets, resourceusage.NewWidget(app))
case "cryptolive":
widgets = append(widgets, cryptolive.NewWidget(app))
case "datadog":

141
resourceusage/widget.go Normal file
View File

@ -0,0 +1,141 @@
package resourceusage
import (
"code.cloudfoundry.org/bytefmt"
"fmt"
"github.com/c9s/goprocinfo/linux"
"github.com/rivo/tview"
"github.com/senorprogrammer/wtf/wtf"
)
var started = false
var ok = true
var prevStats []linux.CPUStat
// Widget define wtf widget to register widget later
type Widget struct {
wtf.BarGraph
}
// NewWidget Make new instance of widget
func NewWidget(app *tview.Application) *Widget {
widget := Widget{
BarGraph: wtf.NewBarGraph(app, "Resource Usage", "resourceusage", false),
}
widget.View.SetWrap(true)
widget.View.SetWordWrap(true)
return &widget
}
/* -------------------- Exported Functions -------------------- */
// MakeGraph - Load the dead drop stats
func MakeGraph(widget *Widget) {
cpuStat, err := linux.ReadStat("/proc/stat")
if err != nil {
return
}
var stats = make([]wtf.Bar, len(cpuStat.CPUStats)+2)
for i, stat := range cpuStat.CPUStats {
prevStat := stat
if len(prevStats) > i {
prevStat = prevStats[i]
} else {
prevStats = append(prevStats, stat)
}
// based on htop algorithm described here: https://stackoverflow.com/a/23376195/1516085
prevIdle := prevStat.Idle + prevStat.IOWait
idle := stat.Idle + stat.IOWait
prevNonIdle := prevStat.User + prevStat.Nice + prevStat.System + prevStat.IRQ + prevStat.SoftIRQ + prevStat.Steal
nonIdle := stat.User + stat.Nice + stat.System + stat.IRQ + stat.SoftIRQ + stat.Steal
prevTotal := prevIdle + prevNonIdle
total := idle + nonIdle
// differentiate: actual value minus the previous one
difference := total - prevTotal
idled := idle - prevIdle
percentage := float64(0)
if difference > 0 {
percentage = float64(difference-idled) / float64(difference)
}
bar := wtf.Bar{
Label: fmt.Sprint(i),
Percent: int(percentage * 100),
ValueLabel: fmt.Sprintf("%d%%", int(percentage*100)),
}
stats[i] = bar
prevStats[i] = stat
}
memInfo, err := linux.ReadMemInfo("/proc/meminfo")
if err != nil {
return
}
memIndex := len(cpuStat.CPUStats)
memUsed := memInfo.MemTotal - memInfo.MemAvailable
memPercent := float64(memUsed) / float64(memInfo.MemTotal)
usedMemLabel := bytefmt.ByteSize(memUsed * bytefmt.KILOBYTE)
totalMemLabel := bytefmt.ByteSize(memInfo.MemTotal * bytefmt.KILOBYTE)
if usedMemLabel[len(usedMemLabel)-1] == totalMemLabel[len(totalMemLabel)-1] {
usedMemLabel = usedMemLabel[:len(usedMemLabel)-1]
}
stats[memIndex] = wtf.Bar{
Label: "Mem",
Percent: int(memPercent * 100),
ValueLabel: fmt.Sprintf("%s/%s", usedMemLabel, totalMemLabel),
}
swapIndex := len(cpuStat.CPUStats) + 1
swapUsed := memInfo.SwapTotal - memInfo.SwapFree
swapPercent := float64(swapUsed) / float64(memInfo.SwapTotal)
usedSwapLabel := bytefmt.ByteSize(swapUsed * bytefmt.KILOBYTE)
totalSwapLabel := bytefmt.ByteSize(memInfo.SwapTotal * bytefmt.KILOBYTE)
if usedSwapLabel[len(usedSwapLabel)-1] == totalMemLabel[len(totalSwapLabel)-1] {
usedSwapLabel = usedSwapLabel[:len(usedSwapLabel)-1]
}
stats[swapIndex] = wtf.Bar{
Label: "Swp",
Percent: int(swapPercent * 100),
ValueLabel: fmt.Sprintf("%s/%s", usedSwapLabel, totalSwapLabel),
}
widget.BarGraph.BuildBars(stats[:])
}
// Refresh & update after interval time
func (widget *Widget) Refresh() {
if widget.Disabled() {
return
}
widget.View.Clear()
display(widget)
}
/* -------------------- Unexported Functions -------------------- */
func display(widget *Widget) {
MakeGraph(widget)
}

201
vendor/code.cloudfoundry.org/bytefmt/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

20
vendor/code.cloudfoundry.org/bytefmt/NOTICE generated vendored Normal file
View File

@ -0,0 +1,20 @@
Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved.
This project contains software that is Copyright (c) 2013-2015 Pivotal Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This project may include a number of subcomponents with separate
copyright notices and license terms. Your use of these subcomponents
is subject to the terms and conditions of each subcomponent's license,
as noted in the LICENSE file.

15
vendor/code.cloudfoundry.org/bytefmt/README.md generated vendored Normal file
View File

@ -0,0 +1,15 @@
bytefmt
=======
**Note**: This repository should be imported as `code.cloudfoundry.org/bytefmt`.
Human-readable byte formatter.
Example:
```go
bytefmt.ByteSize(100.5*bytefmt.MEGABYTE) // returns "100.5M"
bytefmt.ByteSize(uint64(1024)) // returns "1K"
```
For documentation, please see http://godoc.org/code.cloudfoundry.org/bytefmt

105
vendor/code.cloudfoundry.org/bytefmt/bytes.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
// Package bytefmt contains helper methods and constants for converting to and from a human-readable byte format.
//
// bytefmt.ByteSize(100.5*bytefmt.MEGABYTE) // "100.5M"
// bytefmt.ByteSize(uint64(1024)) // "1K"
//
package bytefmt
import (
"errors"
"strconv"
"strings"
"unicode"
)
const (
BYTE = 1 << (10 * iota)
KILOBYTE
MEGABYTE
GIGABYTE
TERABYTE
)
var invalidByteQuantityError = errors.New("byte quantity must be a positive integer with a unit of measurement like M, MB, MiB, G, GiB, or GB")
// ByteSize returns a human-readable byte string of the form 10M, 12.5K, and so forth. The following units are available:
// T: Terabyte
// G: Gigabyte
// M: Megabyte
// K: Kilobyte
// B: Byte
// The unit that results in the smallest number greater than or equal to 1 is always chosen.
func ByteSize(bytes uint64) string {
unit := ""
value := float64(bytes)
switch {
case bytes >= TERABYTE:
unit = "T"
value = value / TERABYTE
case bytes >= GIGABYTE:
unit = "G"
value = value / GIGABYTE
case bytes >= MEGABYTE:
unit = "M"
value = value / MEGABYTE
case bytes >= KILOBYTE:
unit = "K"
value = value / KILOBYTE
case bytes >= BYTE:
unit = "B"
case bytes == 0:
return "0"
}
result := strconv.FormatFloat(value, 'f', 1, 64)
result = strings.TrimSuffix(result, ".0")
return result + unit
}
// ToMegabytes parses a string formatted by ByteSize as megabytes.
func ToMegabytes(s string) (uint64, error) {
bytes, err := ToBytes(s)
if err != nil {
return 0, err
}
return bytes / MEGABYTE, nil
}
// ToBytes parses a string formatted by ByteSize as bytes. Note binary-prefixed and SI prefixed units both mean a base-2 units
// KB = K = KiB = 1024
// MB = M = MiB = 1024 * K
// GB = G = GiB = 1024 * M
// TB = T = TiB = 1024 * G
func ToBytes(s string) (uint64, error) {
s = strings.TrimSpace(s)
s = strings.ToUpper(s)
i := strings.IndexFunc(s, unicode.IsLetter)
if i == -1 {
return 0, invalidByteQuantityError
}
bytesString, multiple := s[:i], s[i:]
bytes, err := strconv.ParseFloat(bytesString, 64)
if err != nil || bytes <= 0 {
return 0, invalidByteQuantityError
}
switch multiple {
case "T", "TB", "TIB":
return uint64(bytes * TERABYTE), nil
case "G", "GB", "GIB":
return uint64(bytes * GIGABYTE), nil
case "M", "MB", "MIB":
return uint64(bytes * MEGABYTE), nil
case "K", "KB", "KIB":
return uint64(bytes * KILOBYTE), nil
case "B":
return uint64(bytes), nil
default:
return 0, invalidByteQuantityError
}
}

1
vendor/code.cloudfoundry.org/bytefmt/package.go generated vendored Normal file
View File

@ -0,0 +1 @@
package bytefmt // import "code.cloudfoundry.org/bytefmt"

21
vendor/github.com/c9s/goprocinfo/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2013-2014 Yo-An Lin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

133
vendor/github.com/c9s/goprocinfo/linux/cpuinfo.go generated vendored Normal file
View File

@ -0,0 +1,133 @@
package linux
import (
"io/ioutil"
"regexp"
"strconv"
"strings"
)
type CPUInfo struct {
Processors []Processor `json:"processors"`
}
func (self *CPUInfo) NumCPU() int {
return len(self.Processors)
}
func (self *CPUInfo) NumCore() int {
core := make(map[string]bool)
for _, p := range self.Processors {
pid := p.PhysicalId
cid := p.CoreId
if pid == -1 {
return self.NumCPU()
} else {
// to avoid fmt import
key := strconv.FormatInt(int64(pid), 10) + ":" + strconv.FormatInt(int64(cid), 10)
core[key] = true
}
}
return len(core)
}
func (self *CPUInfo) NumPhysicalCPU() int {
pcpu := make(map[string]bool)
for _, p := range self.Processors {
pid := p.PhysicalId
if pid == -1 {
return self.NumCPU()
} else {
// to avoid fmt import
key := strconv.FormatInt(int64(pid), 10)
pcpu[key] = true
}
}
return len(pcpu)
}
type Processor struct {
Id int64 `json:"id"`
VendorId string `json:"vendor_id"`
Model int64 `json:"model"`
ModelName string `json:"model_name"`
Flags []string `json:"flags"`
Cores int64 `json:"cores"`
MHz float64 `json:"mhz"`
CacheSize int64 `json:"cache_size"` // KB
PhysicalId int64 `json:"physical_id"`
CoreId int64 `json:"core_id"`
}
var cpuinfoRegExp = regexp.MustCompile("([^:]*?)\\s*:\\s*(.*)$")
func ReadCPUInfo(path string) (*CPUInfo, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
content := string(b)
lines := strings.Split(content, "\n")
var cpuinfo = CPUInfo{}
var processor = &Processor{CoreId: -1, PhysicalId: -1}
for i, line := range lines {
var key string
var value string
if len(line) == 0 && i != len(lines)-1 {
// end of processor
cpuinfo.Processors = append(cpuinfo.Processors, *processor)
processor = &Processor{}
continue
} else if i == len(lines)-1 {
continue
}
submatches := cpuinfoRegExp.FindStringSubmatch(line)
key = submatches[1]
value = submatches[2]
switch key {
case "processor":
processor.Id, _ = strconv.ParseInt(value, 10, 64)
case "vendor_id":
processor.VendorId = value
case "model":
processor.Model, _ = strconv.ParseInt(value, 10, 64)
case "model name":
processor.ModelName = value
case "flags":
processor.Flags = strings.Fields(value)
case "cpu cores":
processor.Cores, _ = strconv.ParseInt(value, 10, 64)
case "cpu MHz":
processor.MHz, _ = strconv.ParseFloat(value, 64)
case "cache size":
processor.CacheSize, _ = strconv.ParseInt(value[:strings.IndexAny(value, " \t\n")], 10, 64)
if strings.HasSuffix(line, "MB") {
processor.CacheSize *= 1024
}
case "physical id":
processor.PhysicalId, _ = strconv.ParseInt(value, 10, 64)
case "core id":
processor.CoreId, _ = strconv.ParseInt(value, 10, 64)
}
/*
processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 26
model name : Intel(R) Xeon(R) CPU L5520 @ 2.27GHz
*/
}
return &cpuinfo, nil
}

26
vendor/github.com/c9s/goprocinfo/linux/disk.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
package linux
import (
"syscall"
)
type Disk struct {
All uint64 `json:"all"`
Used uint64 `json:"used"`
Free uint64 `json:"free"`
FreeInodes uint64 `json:"freeInodes"`
}
func ReadDisk(path string) (*Disk, error) {
fs := syscall.Statfs_t{}
err := syscall.Statfs(path, &fs)
if err != nil {
return nil, err
}
disk := Disk{}
disk.All = fs.Blocks * uint64(fs.Bsize)
disk.Free = fs.Bfree * uint64(fs.Bsize)
disk.Used = disk.All - disk.Free
disk.FreeInodes = fs.Ffree
return &disk, nil
}

100
vendor/github.com/c9s/goprocinfo/linux/diskstat.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
package linux
import (
"io/ioutil"
"strconv"
"strings"
"time"
)
// DiskStat is disk statistics to help measure disk activity.
//
// Note:
// * On a very busy or long-lived system values may wrap.
// * No kernel locks are held while modifying these counters. This implies that
// minor inaccuracies may occur.
//
// More more info see:
// https://www.kernel.org/doc/Documentation/iostats.txt and
// https://www.kernel.org/doc/Documentation/block/stat.txt
type DiskStat struct {
Major int `json:"major"` // major device number
Minor int `json:"minor"` // minor device number
Name string `json:"name"` // device name
ReadIOs uint64 `json:"read_ios"` // number of read I/Os processed
ReadMerges uint64 `json:"read_merges"` // number of read I/Os merged with in-queue I/O
ReadSectors uint64 `json:"read_sectors"` // number of 512 byte sectors read
ReadTicks uint64 `json:"read_ticks"` // total wait time for read requests in milliseconds
WriteIOs uint64 `json:"write_ios"` // number of write I/Os processed
WriteMerges uint64 `json:"write_merges"` // number of write I/Os merged with in-queue I/O
WriteSectors uint64 `json:"write_sectors"` // number of 512 byte sectors written
WriteTicks uint64 `json:"write_ticks"` // total wait time for write requests in milliseconds
InFlight uint64 `json:"in_flight"` // number of I/Os currently in flight
IOTicks uint64 `json:"io_ticks"` // total time this block device has been active in milliseconds
TimeInQueue uint64 `json:"time_in_queue"` // total wait time for all requests in milliseconds
}
// ReadDiskStats reads and parses the file.
//
// Note:
// * Assumes a well formed file and will panic if it isn't.
func ReadDiskStats(path string) ([]DiskStat, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
devices := strings.Split(string(data), "\n")
results := make([]DiskStat, len(devices)-1)
for i := range results {
fields := strings.Fields(devices[i])
Major, _ := strconv.ParseInt(fields[0], 10, strconv.IntSize)
results[i].Major = int(Major)
Minor, _ := strconv.ParseInt(fields[1], 10, strconv.IntSize)
results[i].Minor = int(Minor)
results[i].Name = fields[2]
results[i].ReadIOs, _ = strconv.ParseUint(fields[3], 10, 64)
results[i].ReadMerges, _ = strconv.ParseUint(fields[4], 10, 64)
results[i].ReadSectors, _ = strconv.ParseUint(fields[5], 10, 64)
results[i].ReadTicks, _ = strconv.ParseUint(fields[6], 10, 64)
results[i].WriteIOs, _ = strconv.ParseUint(fields[7], 10, 64)
results[i].WriteMerges, _ = strconv.ParseUint(fields[8], 10, 64)
results[i].WriteSectors, _ = strconv.ParseUint(fields[9], 10, 64)
results[i].WriteTicks, _ = strconv.ParseUint(fields[10], 10, 64)
results[i].InFlight, _ = strconv.ParseUint(fields[11], 10, 64)
results[i].IOTicks, _ = strconv.ParseUint(fields[12], 10, 64)
results[i].TimeInQueue, _ = strconv.ParseUint(fields[13], 10, 64)
}
return results, nil
}
// GetReadBytes returns the number of bytes read.
func (ds *DiskStat) GetReadBytes() int64 {
return int64(ds.ReadSectors) * 512
}
// GetReadTicks returns the duration waited for read requests.
func (ds *DiskStat) GetReadTicks() time.Duration {
return time.Duration(ds.ReadTicks) * time.Millisecond
}
// GetWriteBytes returns the number of bytes written.
func (ds *DiskStat) GetWriteBytes() int64 {
return int64(ds.WriteSectors) * 512
}
// GetReadTicks returns the duration waited for write requests.
func (ds *DiskStat) GetWriteTicks() time.Duration {
return time.Duration(ds.WriteTicks) * time.Millisecond
}
// GetIOTicks returns the duration the disk has been active.
func (ds *DiskStat) GetIOTicks() time.Duration {
return time.Duration(ds.IOTicks) * time.Millisecond
}
// GetTimeInQueue returns the duration waited for all requests.
func (ds *DiskStat) GetTimeInQueue() time.Duration {
return time.Duration(ds.TimeInQueue) * time.Millisecond
}

56
vendor/github.com/c9s/goprocinfo/linux/interrupts.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
package linux
import (
"io/ioutil"
"strconv"
"strings"
)
type Interrupt struct {
Name string
Counts []uint64
Description string
}
type Interrupts struct {
Interrupts []Interrupt
}
func ReadInterrupts(path string) (*Interrupts, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
content := string(b)
lines := strings.Split(content, "\n")
cpus := lines[0]
lines = append(lines[:0], lines[1:]...)
numCpus := len(strings.Fields(cpus))
interrupts := make([]Interrupt, 0)
for _, line := range lines {
fields := strings.Fields(line)
if len(fields) == 0 {
continue
}
counts := make([]uint64, 0)
i := 0
for ; i < numCpus; i++ {
if len(fields) <= i+1 {
break
}
count, err := strconv.ParseInt(fields[i+1], 10, 64)
if err != nil {
return nil, err
}
counts = append(counts, uint64(count))
}
name := strings.TrimSuffix(fields[0], ":")
description := strings.Join(fields[i+1:], " ")
interrupts = append(interrupts, Interrupt{
Name: name,
Counts: counts,
Description: description,
})
}
return &Interrupts{Interrupts: interrupts}, nil
}

67
vendor/github.com/c9s/goprocinfo/linux/loadavg.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
package linux
import (
"errors"
"io/ioutil"
"strconv"
"strings"
)
type LoadAvg struct {
Last1Min float64 `json:"last1min"`
Last5Min float64 `json:"last5min"`
Last15Min float64 `json:"last15min"`
ProcessRunning uint64 `json:"process_running"`
ProcessTotal uint64 `json:"process_total"`
LastPID uint64 `json:"last_pid"`
}
func ReadLoadAvg(path string) (*LoadAvg, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
content := strings.TrimSpace(string(b))
fields := strings.Fields(content)
if len(fields) < 5 {
return nil, errors.New("Cannot parse loadavg: " + content)
}
process := strings.Split(fields[3], "/")
if len(process) != 2 {
return nil, errors.New("Cannot parse loadavg: " + content)
}
loadavg := LoadAvg{}
if loadavg.Last1Min, err = strconv.ParseFloat(fields[0], 64); err != nil {
return nil, err
}
if loadavg.Last5Min, err = strconv.ParseFloat(fields[1], 64); err != nil {
return nil, err
}
if loadavg.Last15Min, err = strconv.ParseFloat(fields[2], 64); err != nil {
return nil, err
}
if loadavg.ProcessRunning, err = strconv.ParseUint(process[0], 10, 64); err != nil {
return nil, err
}
if loadavg.ProcessTotal, err = strconv.ParseUint(process[1], 10, 64); err != nil {
return nil, err
}
if loadavg.LastPID, err = strconv.ParseUint(fields[4], 10, 64); err != nil {
return nil, err
}
return &loadavg, nil
}

97
vendor/github.com/c9s/goprocinfo/linux/meminfo.go generated vendored Normal file
View File

@ -0,0 +1,97 @@
package linux
import (
"io/ioutil"
"reflect"
"strconv"
"strings"
)
type MemInfo struct {
MemTotal uint64 `json:"mem_total"`
MemFree uint64 `json:"mem_free"`
MemAvailable uint64 `json:"mem_available"`
Buffers uint64 `json:"buffers"`
Cached uint64 `json:"cached"`
SwapCached uint64 `json:"swap_cached"`
Active uint64 `json:"active"`
Inactive uint64 `json:"inactive"`
ActiveAnon uint64 `json:"active_anon" field:"Active(anon)"`
InactiveAnon uint64 `json:"inactive_anon" field:"Inactive(anon)"`
ActiveFile uint64 `json:"active_file" field:"Active(file)"`
InactiveFile uint64 `json:"inactive_file" field:"Inactive(file)"`
Unevictable uint64 `json:"unevictable"`
Mlocked uint64 `json:"mlocked"`
SwapTotal uint64 `json:"swap_total"`
SwapFree uint64 `json:"swap_free"`
Dirty uint64 `json:"dirty"`
Writeback uint64 `json:"write_back"`
AnonPages uint64 `json:"anon_pages"`
Mapped uint64 `json:"mapped"`
Shmem uint64 `json:"shmem"`
Slab uint64 `json:"slab"`
SReclaimable uint64 `json:"s_reclaimable"`
SUnreclaim uint64 `json:"s_unclaim"`
KernelStack uint64 `json:"kernel_stack"`
PageTables uint64 `json:"page_tables"`
NFS_Unstable uint64 `json:"nfs_unstable"`
Bounce uint64 `json:"bounce"`
WritebackTmp uint64 `json:"writeback_tmp"`
CommitLimit uint64 `json:"commit_limit"`
Committed_AS uint64 `json:"committed_as"`
VmallocTotal uint64 `json:"vmalloc_total"`
VmallocUsed uint64 `json:"vmalloc_used"`
VmallocChunk uint64 `json:"vmalloc_chunk"`
HardwareCorrupted uint64 `json:"hardware_corrupted"`
AnonHugePages uint64 `json:"anon_huge_pages"`
HugePages_Total uint64 `json:"huge_pages_total"`
HugePages_Free uint64 `json:"huge_pages_free"`
HugePages_Rsvd uint64 `json:"huge_pages_rsvd"`
HugePages_Surp uint64 `json:"huge_pages_surp"`
Hugepagesize uint64 `json:"hugepagesize"`
DirectMap4k uint64 `json:"direct_map_4k"`
DirectMap2M uint64 `json:"direct_map_2M"`
DirectMap1G uint64 `json:"direct_map_1G"`
}
func ReadMemInfo(path string) (*MemInfo, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
lines := strings.Split(string(data), "\n")
// Maps a meminfo metric to its value (i.e. MemTotal --> 100000)
statMap := make(map[string]uint64)
var info = MemInfo{}
for _, line := range lines {
fields := strings.SplitN(line, ":", 2)
if len(fields) < 2 {
continue
}
valFields := strings.Fields(fields[1])
val, _ := strconv.ParseUint(valFields[0], 10, 64)
statMap[fields[0]] = val
}
elem := reflect.ValueOf(&info).Elem()
typeOfElem := elem.Type()
for i := 0; i < elem.NumField(); i++ {
val, ok := statMap[typeOfElem.Field(i).Name]
if ok {
elem.Field(i).SetUint(val)
continue
}
val, ok = statMap[typeOfElem.Field(i).Tag.Get("field")]
if ok {
elem.Field(i).SetUint(val)
}
}
return &info, nil
}

49
vendor/github.com/c9s/goprocinfo/linux/mounts.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
package linux
import (
"bufio"
"os"
"strings"
)
type Mounts struct {
Mounts []Mount `json:"mounts"`
}
type Mount struct {
Device string `json:"device"`
MountPoint string `json:"mountpoint"`
FSType string `json:"fstype"`
Options string `json:"options"`
}
const (
DefaultBufferSize = 1024
)
func ReadMounts(path string) (*Mounts, error) {
fin, err := os.Open(path)
if err != nil {
return nil, err
}
defer fin.Close()
var mounts = Mounts{}
scanner := bufio.NewScanner(fin)
for scanner.Scan() {
fields := strings.Fields(scanner.Text())
var mount = &Mount{
fields[0],
fields[1],
fields[2],
fields[3],
}
mounts.Mounts = append(mounts.Mounts, *mount)
}
if err := scanner.Err(); err != nil {
return nil, err
}
return &mounts, nil
}

173
vendor/github.com/c9s/goprocinfo/linux/net_ip.go generated vendored Normal file
View File

@ -0,0 +1,173 @@
package linux
import (
"errors"
"net"
"regexp"
"strconv"
"strings"
)
var (
ipv4RegExp = regexp.MustCompile("^[0-9a-fA-F]{8}:[0-9a-fA-F]{4}$") // Regex for NetIPv4Decoder
ipv6RegExp = regexp.MustCompile("^[0-9a-fA-F]{32}:[0-9a-fA-F]{4}$") // Regex for NetIPv6Decoder
)
type NetIPDecoder func(string) (string, error) // Either NetIPv4Decoder or NetIPv6Decoder
type NetSocket struct {
LocalAddress string `json:"local_address"`
RemoteAddress string `json:"remote_address"`
Status uint8 `json:"st"`
TxQueue uint64 `json:"tx_queue"`
RxQueue uint64 `json:"rx_queue"`
Uid uint32 `json:"uid"`
Inode uint64 `json:"inode"`
SocketReferenceCount uint64 `json:"ref"`
}
func parseNetSocket(f []string, ip NetIPDecoder) (*NetSocket, error) {
if len(f) < 11 {
return nil, errors.New("Cannot parse net socket line: " + strings.Join(f, " "))
}
if strings.Index(f[4], ":") == -1 {
return nil, errors.New("Cannot parse tx/rx queues: " + f[4])
}
q := strings.Split(f[4], ":")
socket := &NetSocket{}
var s uint64 // socket.Status
var u uint64 // socket.Uid
var err error // parse error
if socket.LocalAddress, err = ip(f[1]); err != nil {
return nil, err
}
if socket.RemoteAddress, err = ip(f[2]); err != nil {
return nil, err
}
if s, err = strconv.ParseUint(f[3], 16, 8); err != nil {
return nil, err
}
if socket.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
return nil, err
}
if socket.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
return nil, err
}
if u, err = strconv.ParseUint(f[7], 10, 32); err != nil {
return nil, err
}
if socket.Inode, err = strconv.ParseUint(f[9], 10, 64); err != nil {
return nil, err
}
if socket.SocketReferenceCount, err = strconv.ParseUint(f[10], 10, 64); err != nil {
return nil, err
}
socket.Status = uint8(s)
socket.Uid = uint32(u)
return socket, nil
}
// Decode an IPv4 address with port from a given hex string
// NOTE: This function match NetIPDecoder type
func NetIPv4Decoder(s string) (string, error) {
if !ipv4RegExp.MatchString(s) {
return "", errors.New("Cannot decode ipv4 address: " + s)
}
i := strings.Split(s, ":")
b := make([]byte, 4)
for j := 0; j < 4; j++ {
x := j * 2
y := x + 2
z := 3 - j
// Extract 2 characters from hex string, 4 times.
//
// s: "0100007F" -> [
// h: "01", h: "00", h: "00", h: "7F",
// ]
h := i[0][x:y]
// Reverse byte order
n, _ := strconv.ParseUint(h, 16, 8)
b[z] = byte(n)
}
h := net.IP(b).String()
n, _ := strconv.ParseUint(i[1], 16, 64)
p := strconv.FormatUint(n, 10)
// ipv4:port
v := h + ":" + p
return v, nil
}
// Decode an IPv6 address with port from a given hex string
// NOTE: This function match NetIPDecoder type
func NetIPv6Decoder(s string) (string, error) {
if !ipv6RegExp.MatchString(s) {
return "", errors.New("Cannot decode ipv6 address: " + s)
}
i := strings.Split(s, ":")
b := make([]byte, 16)
for j := 0; j < 4; j++ {
x := j * 8
y := x + 8
// Extract 8 characters from hex string, 4 times.
//
// s: "350E012A900F122E85EDEAADA64DAAD1" -> [
// h: "350E012A", h: "900F122E",
// h: "85EDEAAD", h: "A64DAAD1",
// ]
h := i[0][x:y]
for k := 0; k < 4; k++ {
// Reverse byte order
// "350E012A" -> [ 0x2A, 0x01, 0x0E, 0x35 ]
z := (j * 4) + k
g := 8 - (k * 2)
f := g - 2
n, _ := strconv.ParseUint(h[f:g], 16, 8)
b[z] = byte(n)
}
}
h := net.IP(b).String()
n, _ := strconv.ParseUint(i[1], 16, 64)
p := strconv.FormatUint(n, 10)
// ipv6:port
v := h + ":" + p
return v, nil
}

82
vendor/github.com/c9s/goprocinfo/linux/net_tcp.go generated vendored Normal file
View File

@ -0,0 +1,82 @@
package linux
import (
"io/ioutil"
"strconv"
"strings"
)
type NetTCPSockets struct {
Sockets []NetTCPSocket `json:"sockets"`
}
type NetTCPSocket struct {
NetSocket
RetransmitTimeout uint64 `json:"retransmit_timeout"`
PredictedTick uint64 `json:"predicted_tick"`
AckQuick uint8 `json:"ack_quick"`
AckPingpong bool `json:"ack_pingpong"`
SendingCongestionWindow uint64 `json:"sending_congestion_window"`
SlowStartSizeThreshold int64 `json:"slow_start_size_threshold"`
}
func ReadNetTCPSockets(path string, ip NetIPDecoder) (*NetTCPSockets, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
lines := strings.Split(string(b), "\n")
tcp := &NetTCPSockets{}
for i := 1; i < len(lines); i++ {
line := lines[i]
f := strings.Fields(line)
if len(f) < 17 {
continue
}
s, err := parseNetSocket(f, ip)
if err != nil {
return nil, err
}
var n int64
e := &NetTCPSocket{
NetSocket: *s,
}
if e.RetransmitTimeout, err = strconv.ParseUint(f[12], 10, 64); err != nil {
return nil, err
}
if e.PredictedTick, err = strconv.ParseUint(f[13], 10, 64); err != nil {
return nil, err
}
if n, err = strconv.ParseInt(f[14], 10, 8); err != nil {
return nil, err
}
e.AckQuick = uint8(n >> 1)
e.AckPingpong = ((n & 1) == 1)
if e.SendingCongestionWindow, err = strconv.ParseUint(f[15], 10, 64); err != nil {
return nil, err
}
if e.SlowStartSizeThreshold, err = strconv.ParseInt(f[16], 10, 32); err != nil {
return nil, err
}
tcp.Sockets = append(tcp.Sockets, *e)
}
return tcp, nil
}

59
vendor/github.com/c9s/goprocinfo/linux/net_udp.go generated vendored Normal file
View File

@ -0,0 +1,59 @@
package linux
import (
"io/ioutil"
"strconv"
"strings"
)
type NetUDPSockets struct {
Sockets []NetUDPSocket `json:"sockets"`
}
type NetUDPSocket struct {
NetSocket
Drops uint64 `json:"drops"`
}
func ReadNetUDPSockets(path string, ip NetIPDecoder) (*NetUDPSockets, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
lines := strings.Split(string(b), "\n")
udp := &NetUDPSockets{}
for i := 1; i < len(lines); i++ {
line := lines[i]
f := strings.Fields(line)
if len(f) < 13 {
continue
}
s, err := parseNetSocket(f, ip)
if err != nil {
return nil, err
}
e := &NetUDPSocket{
NetSocket: *s,
Drops: 0,
}
if e.Drops, err = strconv.ParseUint(f[12], 10, 64); err != nil {
return nil, err
}
udp.Sockets = append(udp.Sockets, *e)
}
return udp, nil
}

174
vendor/github.com/c9s/goprocinfo/linux/netstat.go generated vendored Normal file
View File

@ -0,0 +1,174 @@
package linux
import (
"io/ioutil"
"reflect"
"strconv"
"strings"
)
type NetStat struct {
// TcpExt
SyncookiesSent uint64 `json:"syncookie_sent"`
SyncookiesRecv uint64 `json:"syncookies_recv"`
SyncookiesFailed uint64 `json:"syncookies_failed"`
EmbryonicRsts uint64 `json:"embryonic_rsts"`
PruneCalled uint64 `json:"prune_called"`
RcvPruned uint64 `json:"rcv_pruned"`
OfoPruned uint64 `json:"ofo_pruned"`
OutOfWindowIcmps uint64 `json:"out_of_window_icmps"`
LockDroppedIcmps uint64 `json:"lock_dropped_icmps"`
ArpFilter uint64 `json:"arp_filter"`
TW uint64 `json:"tw"`
TWRecycled uint64 `json:"tw_recycled"`
TWKilled uint64 `json:"tw_killed"`
PAWSPassive uint64 `json:"paws_passive"`
PAWSActive uint64 `json:"paws_active"`
PAWSEstab uint64 `json:"paws_estab"`
DelayedACKs uint64 `json:"delayed_acks"`
DelayedACKLocked uint64 `json:"delayed_ack_locked"`
DelayedACKLost uint64 `json:"delayed_ack_lost"`
ListenOverflows uint64 `json:"listen_overflows"`
ListenDrops uint64 `json:"listen_drops"`
TCPPrequeued uint64 `json:"tcp_prequeued"`
TCPDirectCopyFromBacklog uint64 `json:"tcp_direct_copy_from_backlog"`
TCPDirectCopyFromPrequeue uint64 `json:"tcp_direct_copy_from_prequeue"`
TCPPrequeueDropped uint64 `json:"tcp_prequeue_dropped"`
TCPHPHits uint64 `json:"tcp_hp_hits"`
TCPHPHitsToUser uint64 `json:"tcp_hp_hits_to_user"`
TCPPureAcks uint64 `json:"tcp_pure_acks"`
TCPHPAcks uint64 `json:"tcp_hp_acks"`
TCPRenoRecovery uint64 `json:"tcp_reno_recovery"`
TCPSackRecovery uint64 `json:"tcp_sack_recovery"`
TCPSACKReneging uint64 `json:"tcp_sack_reneging"`
TCPFACKReorder uint64 `json:"tcp_fack_reorder"`
TCPSACKReorder uint64 `json:"tcp_sack_reorder"`
TCPRenoReorder uint64 `json:"tcp_reno_reorder"`
TCPTSReorder uint64 `json:"tcp_ts_reorder"`
TCPFullUndo uint64 `json:"tcp_full_undo"`
TCPPartialUndo uint64 `json:"tcp_partial_undo"`
TCPDSACKUndo uint64 `json:"tcp_dsack_undo"`
TCPLossUndo uint64 `json:"tcp_loss_undo"`
TCPLoss uint64 `json:"tcp_loss"`
TCPLostRetransmit uint64 `json:"tcp_lost_retransmit"`
TCPRenoFailures uint64 `json:"tcp_reno_failures"`
TCPSackFailures uint64 `json:"tcp_sack_failures"`
TCPLossFailures uint64 `json:"tcp_loss_failures"`
TCPFastRetrans uint64 `json:"tcp_fast_retrans"`
TCPForwardRetrans uint64 `json:"tcp_forward_retrans"`
TCPSlowStartRetrans uint64 `json:"tcp_slow_start_retrans"`
TCPTimeouts uint64 `json:"tcp_timeouts"`
TCPLossProbes uint64 `json:"tcp_loss_probes"`
TCPLossProbeRecovery uint64 `json:"tcp_loss_probe_recovery"`
TCPRenoRecoveryFail uint64 `json:"tcp_reno_recovery_fail"`
TCPSackRecoveryFail uint64 `json:"tcp_sack_recovery_fail"`
TCPSchedulerFailed uint64 `json:"tcp_scheduler_failed"`
TCPRcvCollapsed uint64 `json:"tcp_rcv_collapsed"`
TCPDSACKOldSent uint64 `json:"tcp_dsack_old_sent"`
TCPDSACKOfoSent uint64 `json:"tcp_dsack_ofo_sent"`
TCPDSACKRecv uint64 `json:"tcp_dsack_recv"`
TCPDSACKOfoRecv uint64 `json:"tcp_dsack_ofo_recv"`
TCPAbortOnSyn uint64 `json:"tcp_abort_on_syn"`
TCPAbortOnData uint64 `json:"tcp_abort_on_data"`
TCPAbortOnClose uint64 `json:"tcp_abort_on_close"`
TCPAbortOnMemory uint64 `json:"tcp_abort_on_memory"`
TCPAbortOnTimeout uint64 `json:"tcp_abort_on_timeout"`
TCPAbortOnLinger uint64 `json:"tcp_abort_on_linger"`
TCPAbortFailed uint64 `json:"tcp_abort_failed"`
TCPMemoryPressures uint64 `json:"tcp_memory_pressures"`
TCPSACKDiscard uint64 `json:"tcp_sack_discard"`
TCPDSACKIgnoredOld uint64 `json:"tcp_dsack_ignored_old"`
TCPDSACKIgnoredNoUndo uint64 `json:"tcp_dsack_ignored_no_undo"`
TCPSpuriousRTOs uint64 `json:"tcp_spurious_rtos"`
TCPMD5NotFound uint64 `json:"tcp_md5_not_found"`
TCPMD5Unexpected uint64 `json:"tcp_md5_unexpected"`
TCPSackShifted uint64 `json:"tcp_sack_shifted"`
TCPSackMerged uint64 `json:"tcp_sack_merged"`
TCPSackShiftFallback uint64 `json:"tcp_sack_shift_fallback"`
TCPBacklogDrop uint64 `json:"tcp_backlog_drop"`
TCPMinTTLDrop uint64 `json:"tcp_min_ttl_drop"`
TCPDeferAcceptDrop uint64 `json:"tcp_defer_accept_drop"`
IPReversePathFilter uint64 `json:"ip_reverse_path_filter"`
TCPTimeWaitOverflow uint64 `json:"tcp_time_wait_overflow"`
TCPReqQFullDoCookies uint64 `json:"tcp_req_q_full_do_cookies"`
TCPReqQFullDrop uint64 `json:"tcp_req_q_full_drop"`
TCPRetransFail uint64 `json:"tcp_retrans_fail"`
TCPRcvCoalesce uint64 `json:"tcp_rcv_coalesce"`
TCPOFOQueue uint64 `json:"tcp_ofo_drop"`
TCPOFODrop uint64 `json:"tcp_ofo_drop"`
TCPOFOMerge uint64 `json:"tcp_ofo_merge"`
TCPChallengeACK uint64 `json:"tcp_challenge_ack"`
TCPSYNChallenge uint64 `json:"tcp_syn_challenge"`
TCPFastOpenActive uint64 `json:"tcp_fast_open_active"`
TCPFastOpenActiveFail uint64 `json:"tcp_fast_open_active_fail"`
TCPFastOpenPassive uint64 `json:"tcp_fast_open_passive"`
TCPFastOpenPassiveFail uint64 `json:"tcp_fast_open_passive_fail"`
TCPFastOpenListenOverflow uint64 `json:"tcp_fast_open_listen_overflow"`
TCPFastOpenCookieReqd uint64 `json:"tcp_fast_open_cookie_reqd"`
TCPSpuriousRtxHostQueues uint64 `json:"tcp_spurious_rtx_host_queues"`
BusyPollRxPackets uint64 `json:"busy_poll_rx_packets"`
TCPAutoCorking uint64 `json:"tcp_auto_corking"`
TCPFromZeroWindowAdv uint64 `json:"tcp_from_zero_window_adv"`
TCPToZeroWindowAdv uint64 `json:"tcp_to_zero_window_adv"`
TCPWantZeroWindowAdv uint64 `json:"tcp_want_zero_window_adv"`
TCPSynRetrans uint64 `json:"tcp_syn_retrans"`
TCPOrigDataSent uint64 `json:"tcp_orig_data_sent"`
// IpExt
InNoRoutes uint64 `json:"in_no_routes"`
InTruncatedPkts uint64 `json:"in_truncated_pkts"`
InMcastPkts uint64 `json:"in_mcast_pkts"`
OutMcastPkts uint64 `json:"out_mcast_pkts"`
InBcastPkts uint64 `json:"in_bcast_pkts"`
OutBcastPkts uint64 `json:"out_bcast_pkts"`
InOctets uint64 `json:"in_octets"`
OutOctets uint64 `json:"out_octets"`
InMcastOctets uint64 `json:"in_mcast_octets"`
OutMcastOctets uint64 `json:"out_mcast_octets"`
InBcastOctets uint64 `json:"in_bcast_octets"`
OutBcastOctets uint64 `json:"out_bcast_octets"`
InCsumErrors uint64 `json:"in_csum_errors"`
InNoECTPkts uint64 `json:"in_no_ect_pkts"`
InECT1Pkts uint64 `json:"in_ect1_pkts"`
InECT0Pkts uint64 `json:"in_ect0_pkts"`
InCEPkts uint64 `json:"in_ce_pkts"`
}
func ReadNetStat(path string) (*NetStat, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
lines := strings.Split(string(data), "\n")
// Maps a netstat metric to its value (i.e. SyncookiesSent --> 0)
statMap := make(map[string]string)
// patterns
// TcpExt: SyncookiesSent SyncookiesRecv SyncookiesFailed... <-- header
// TcpExt: 0 0 1764... <-- values
for i := 1; i < len(lines); i = i + 2 {
headers := strings.Fields(lines[i-1][strings.Index(lines[i-1], ":")+1:])
values := strings.Fields(lines[i][strings.Index(lines[i], ":")+1:])
for j, header := range headers {
statMap[header] = values[j]
}
}
var netstat NetStat = NetStat{}
elem := reflect.ValueOf(&netstat).Elem()
typeOfElem := elem.Type()
for i := 0; i < elem.NumField(); i++ {
if val, ok := statMap[typeOfElem.Field(i).Name]; ok {
parsedVal, _ := strconv.ParseUint(val, 10, 64)
elem.Field(i).SetUint(parsedVal)
}
}
return &netstat, nil
}

73
vendor/github.com/c9s/goprocinfo/linux/network_stat.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
package linux
import (
"io/ioutil"
"strconv"
"strings"
)
type NetworkStat struct {
Iface string `json:"iface"`
RxBytes uint64 `json:"rxbytes"`
RxPackets uint64 `json:"rxpackets"`
RxErrs uint64 `json:"rxerrs"`
RxDrop uint64 `json:"rxdrop"`
RxFifo uint64 `json:"rxfifo"`
RxFrame uint64 `json:"rxframe"`
RxCompressed uint64 `json:"rxcompressed"`
RxMulticast uint64 `json:"rxmulticast"`
TxBytes uint64 `json:"txbytes"`
TxPackets uint64 `json:"txpackets"`
TxErrs uint64 `json:"txerrs"`
TxDrop uint64 `json:"txdrop"`
TxFifo uint64 `json:"txfifo"`
TxColls uint64 `json:"txcolls"`
TxCarrier uint64 `json:"txcarrier"`
TxCompressed uint64 `json:"txcompressed"`
}
func ReadNetworkStat(path string) ([]NetworkStat, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
lines := strings.Split(string(data), "\n")
// lines[2:] remove /proc/net/dev header
results := make([]NetworkStat, len(lines[2:])-1)
for i, line := range lines[2:] {
// patterns
// <iface>: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
// or
// <iface>:0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 (without space after colon)
colon := strings.Index(line, ":")
if colon > 0 {
metrics := line[colon+1:]
fields := strings.Fields(metrics)
results[i].Iface = strings.Replace(line[0:colon], " ", "", -1)
results[i].RxBytes, _ = strconv.ParseUint(fields[0], 10, 64)
results[i].RxPackets, _ = strconv.ParseUint(fields[1], 10, 64)
results[i].RxErrs, _ = strconv.ParseUint(fields[2], 10, 64)
results[i].RxDrop, _ = strconv.ParseUint(fields[3], 10, 64)
results[i].RxFifo, _ = strconv.ParseUint(fields[4], 10, 64)
results[i].RxFrame, _ = strconv.ParseUint(fields[5], 10, 64)
results[i].RxCompressed, _ = strconv.ParseUint(fields[6], 10, 64)
results[i].RxMulticast, _ = strconv.ParseUint(fields[7], 10, 64)
results[i].TxBytes, _ = strconv.ParseUint(fields[8], 10, 64)
results[i].TxPackets, _ = strconv.ParseUint(fields[9], 10, 64)
results[i].TxErrs, _ = strconv.ParseUint(fields[10], 10, 64)
results[i].TxDrop, _ = strconv.ParseUint(fields[11], 10, 64)
results[i].TxFifo, _ = strconv.ParseUint(fields[12], 10, 64)
results[i].TxColls, _ = strconv.ParseUint(fields[13], 10, 64)
results[i].TxCarrier, _ = strconv.ParseUint(fields[14], 10, 64)
results[i].TxCompressed, _ = strconv.ParseUint(fields[15], 10, 64)
}
}
return results, nil
}

62
vendor/github.com/c9s/goprocinfo/linux/process.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
package linux
import (
"os"
"path/filepath"
"strconv"
)
type Process struct {
Status ProcessStatus `json:"status"`
Statm ProcessStatm `json:"statm"`
Stat ProcessStat `json:"stat"`
IO ProcessIO `json:"io"`
Cmdline string `json:"cmdline"`
}
func ReadProcess(pid uint64, path string) (*Process, error) {
var err error
p := filepath.Join(path, strconv.FormatUint(pid, 10))
if _, err = os.Stat(p); err != nil {
return nil, err
}
process := Process{}
var io *ProcessIO
var stat *ProcessStat
var statm *ProcessStatm
var status *ProcessStatus
var cmdline string
if io, err = ReadProcessIO(filepath.Join(p, "io")); err != nil {
return nil, err
}
if stat, err = ReadProcessStat(filepath.Join(p, "stat")); err != nil {
return nil, err
}
if statm, err = ReadProcessStatm(filepath.Join(p, "statm")); err != nil {
return nil, err
}
if status, err = ReadProcessStatus(filepath.Join(p, "status")); err != nil {
return nil, err
}
if cmdline, err = ReadProcessCmdline(filepath.Join(p, "cmdline")); err != nil {
return nil, err
}
process.IO = *io
process.Stat = *stat
process.Statm = *statm
process.Status = *status
process.Cmdline = cmdline
return &process, nil
}

View File

@ -0,0 +1,39 @@
package linux
import (
"io/ioutil"
"strings"
)
func ReadProcessCmdline(path string) (string, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return "", err
}
l := len(b) - 1 // Define limit before last byte ('\0')
z := byte(0) // '\0' or null byte
s := byte(0x20) // space byte
c := 0 // cursor of useful bytes
for i := 0; i < l; i++ {
// Check if next byte is not a '\0' byte.
if b[i+1] != z {
// Offset must match a '\0' byte.
c = i + 2
// If current byte is '\0', replace it with a space byte.
if b[i] == z {
b[i] = s
}
}
}
x := strings.TrimSpace(string(b[0:c]))
return x, nil
}

71
vendor/github.com/c9s/goprocinfo/linux/process_io.go generated vendored Normal file
View File

@ -0,0 +1,71 @@
package linux
import (
"io/ioutil"
"reflect"
"strconv"
"strings"
)
// I/O statistics for the process.
type ProcessIO struct {
RChar uint64 `json:"rchar" field:"rchar"` // chars read
WChar uint64 `json:"wchar" field:"wchar"` // chars written
Syscr uint64 `json:"syscr" field:"syscr"` // read syscalls
Syscw uint64 `json:"syscw" field:"syscw"` // write syscalls
ReadBytes uint64 `json:"read_bytes" field:"read_bytes"` // bytes read
WriteBytes uint64 `json:"write_bytes" field:"write_bytes"` // bytes written
CancelledWriteBytes uint64 `json:"cancelled_write_bytes" field:"cancelled_write_bytes"` // bytes truncated
}
func ReadProcessIO(path string) (*ProcessIO, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
// Maps a io metric to its value (i.e. rchar --> 100000)
m := map[string]uint64{}
io := ProcessIO{}
lines := strings.Split(string(b), "\n")
for _, line := range lines {
if strings.Index(line, ": ") == -1 {
continue
}
l := strings.Split(line, ": ")
k := l[0]
v, err := strconv.ParseUint(l[1], 10, 64)
if err != nil {
return nil, err
}
m[k] = v
}
e := reflect.ValueOf(&io).Elem()
t := e.Type()
for i := 0; i < e.NumField(); i++ {
k := t.Field(i).Tag.Get("field")
v, ok := m[k]
if ok {
e.Field(i).SetUint(v)
}
}
return &io, nil
}

54
vendor/github.com/c9s/goprocinfo/linux/process_pid.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
package linux
import (
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
)
func ReadMaxPID(path string) (uint64, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return 0, err
}
s := strings.TrimSpace(string(b))
i, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return 0, err
}
return i, nil
}
func ListPID(path string, max uint64) ([]uint64, error) {
l := make([]uint64, 0, 5)
for i := uint64(1); i <= max; i++ {
p := filepath.Join(path, strconv.FormatUint(i, 10))
s, err := os.Stat(p)
if err != nil && !os.IsNotExist(err) {
return nil, err
}
if err != nil || !s.IsDir() {
continue
}
l = append(l, i)
}
return l, nil
}

303
vendor/github.com/c9s/goprocinfo/linux/process_stat.go generated vendored Normal file
View File

@ -0,0 +1,303 @@
package linux
import (
"io/ioutil"
"regexp"
"strconv"
"strings"
)
// Status information about the process.
type ProcessStat struct {
Pid uint64 `json:"pid"`
Comm string `json:"comm"`
State string `json:"state"`
Ppid int64 `json:"ppid"`
Pgrp int64 `json:"pgrp"`
Session int64 `json:"session"`
TtyNr int64 `json:"tty_nr"`
Tpgid int64 `json:"tpgid"`
Flags uint64 `json:"flags"`
Minflt uint64 `json:"minflt"`
Cminflt uint64 `json:"cminflt"`
Majflt uint64 `json:"majflt"`
Cmajflt uint64 `json:"cmajflt"`
Utime uint64 `json:"utime"`
Stime uint64 `json:"stime"`
Cutime int64 `json:"cutime"`
Cstime int64 `json:"cstime"`
Priority int64 `json:"priority"`
Nice int64 `json:"nice"`
NumThreads int64 `json:"num_threads"`
Itrealvalue int64 `json:"itrealvalue"`
Starttime uint64 `json:"starttime"`
Vsize uint64 `json:"vsize"`
Rss int64 `json:"rss"`
Rsslim uint64 `json:"rsslim"`
Startcode uint64 `json:"startcode"`
Endcode uint64 `json:"endcode"`
Startstack uint64 `json:"startstack"`
Kstkesp uint64 `json:"kstkesp"`
Kstkeip uint64 `json:"kstkeip"`
Signal uint64 `json:"signal"`
Blocked uint64 `json:"blocked"`
Sigignore uint64 `json:"sigignore"`
Sigcatch uint64 `json:"sigcatch"`
Wchan uint64 `json:"wchan"`
Nswap uint64 `json:"nswap"`
Cnswap uint64 `json:"cnswap"`
ExitSignal int64 `json:"exit_signal"`
Processor int64 `json:"processor"`
RtPriority uint64 `json:"rt_priority"`
Policy uint64 `json:"policy"`
DelayacctBlkioTicks uint64 `json:"delayacct_blkio_ticks"`
GuestTime uint64 `json:"guest_time"`
CguestTime int64 `json:"cguest_time"`
StartData uint64 `json:"start_data"`
EndData uint64 `json:"end_data"`
StartBrk uint64 `json:"start_brk"`
ArgStart uint64 `json:"arg_start"`
ArgEnd uint64 `json:"arg_end"`
EnvStart uint64 `json:"env_start"`
EnvEnd uint64 `json:"env_end"`
ExitCode int64 `json:"exit_code"`
}
var processStatRegExp = regexp.MustCompile("^(\\d+)( \\(.*?\\) )(.*)$")
func ReadProcessStat(path string) (*ProcessStat, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
s := string(b)
f := make([]string, 0, 32)
e := processStatRegExp.FindStringSubmatch(strings.TrimSpace(s))
// Inject process Pid
f = append(f, e[1])
// Inject process Comm
f = append(f, strings.TrimSpace(e[2]))
// Inject all remaining process info
f = append(f, (strings.Fields(e[3]))...)
stat := ProcessStat{}
for i := 0; i < len(f); i++ {
switch i {
case 0:
if stat.Pid, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 1:
stat.Comm = f[i]
case 2:
stat.State = f[i]
case 3:
if stat.Ppid, err = strconv.ParseInt(f[i], 10, 64); err != nil {
return nil, err
}
case 4:
if stat.Pgrp, err = strconv.ParseInt(f[i], 10, 64); err != nil {
return nil, err
}
case 5:
if stat.Session, err = strconv.ParseInt(f[i], 10, 64); err != nil {
return nil, err
}
case 6:
if stat.TtyNr, err = strconv.ParseInt(f[i], 10, 64); err != nil {
return nil, err
}
case 7:
if stat.Tpgid, err = strconv.ParseInt(f[i], 10, 64); err != nil {
return nil, err
}
case 8:
if stat.Flags, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 9:
if stat.Minflt, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 10:
if stat.Cminflt, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 11:
if stat.Majflt, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 12:
if stat.Cmajflt, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 13:
if stat.Utime, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 14:
if stat.Stime, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 15:
if stat.Cutime, err = strconv.ParseInt(f[i], 10, 64); err != nil {
return nil, err
}
case 16:
if stat.Cstime, err = strconv.ParseInt(f[i], 10, 64); err != nil {
return nil, err
}
case 17:
if stat.Priority, err = strconv.ParseInt(f[i], 10, 64); err != nil {
return nil, err
}
case 18:
if stat.Nice, err = strconv.ParseInt(f[i], 10, 64); err != nil {
return nil, err
}
case 19:
if stat.NumThreads, err = strconv.ParseInt(f[i], 10, 64); err != nil {
return nil, err
}
case 20:
if stat.Itrealvalue, err = strconv.ParseInt(f[i], 10, 64); err != nil {
return nil, err
}
case 21:
if stat.Starttime, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 22:
if stat.Vsize, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 23:
if stat.Rss, err = strconv.ParseInt(f[i], 10, 64); err != nil {
return nil, err
}
case 24:
if stat.Rsslim, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 25:
if stat.Startcode, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 26:
if stat.Endcode, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 27:
if stat.Startstack, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 28:
if stat.Kstkesp, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 29:
if stat.Kstkeip, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 30:
if stat.Signal, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 31:
if stat.Blocked, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 32:
if stat.Sigignore, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 33:
if stat.Sigcatch, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 34:
if stat.Wchan, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 35:
if stat.Nswap, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 36:
if stat.Cnswap, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 37:
if stat.ExitSignal, err = strconv.ParseInt(f[i], 10, 64); err != nil {
return nil, err
}
case 38:
if stat.Processor, err = strconv.ParseInt(f[i], 10, 64); err != nil {
return nil, err
}
case 39:
if stat.RtPriority, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 40:
if stat.Policy, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 41:
if stat.DelayacctBlkioTicks, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 42:
if stat.GuestTime, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 43:
if stat.CguestTime, err = strconv.ParseInt(f[i], 10, 64); err != nil {
return nil, err
}
case 44:
if stat.StartData, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 45:
if stat.EndData, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 46:
if stat.StartBrk, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 47:
if stat.ArgStart, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 48:
if stat.ArgEnd, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 49:
if stat.EnvStart, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 50:
if stat.EnvEnd, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
case 51:
if stat.ExitCode, err = strconv.ParseInt(f[i], 10, 64); err != nil {
return nil, err
}
}
}
return &stat, nil
}

View File

@ -0,0 +1,61 @@
package linux
import (
"io/ioutil"
"strconv"
"strings"
)
// Provides information about memory usage, measured in pages.
type ProcessStatm struct {
Size uint64 `json:"size"` // total program size
Resident uint64 `json:"resident"` // resident set size
Share uint64 `json:"share"` // shared pages
Text uint64 `json:"text"` // text (code)
Lib uint64 `json:"lib"` // library (unused in Linux 2.6)
Data uint64 `json:"data"` // data + stack
Dirty uint64 `json:"dirty"` // dirty pages (unused in Linux 2.6)
}
func ReadProcessStatm(path string) (*ProcessStatm, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
s := string(b)
f := strings.Fields(s)
statm := ProcessStatm{}
var n uint64
for i := 0; i < len(f); i++ {
if n, err = strconv.ParseUint(f[i], 10, 64); err != nil {
return nil, err
}
switch i {
case 0:
statm.Size = n
case 1:
statm.Resident = n
case 2:
statm.Share = n
case 3:
statm.Text = n
case 4:
statm.Lib = n
case 5:
statm.Data = n
case 6:
statm.Dirty = n
}
}
return &statm, nil
}

View File

@ -0,0 +1,331 @@
package linux
import (
"io/ioutil"
"strconv"
"strings"
)
// Provides much of the information from ProcessStatm and ProcessStat
type ProcessStatus struct {
Name string
State string
Tgid uint64
Pid uint64
PPid int64
TracerPid uint64
RealUid uint64
EffectiveUid uint64
SavedSetUid uint64
FilesystemUid uint64
RealGid uint64
EffectiveGid uint64
SavedSetGid uint64
FilesystemGid uint64
FDSize uint64
Groups []int64
VmPeak uint64
VmSize uint64
VmLck uint64
VmHWM uint64
VmRSS uint64
VmData uint64
VmStk uint64
VmExe uint64
VmLib uint64
VmPTE uint64
VmSwap uint64
Threads uint64
SigQLength uint64
SigQLimit uint64
SigPnd uint64
ShdPnd uint64
SigBlk uint64
SigIgn uint64
SigCgt uint64
CapInh uint64
CapPrm uint64
CapEff uint64
CapBnd uint64
Seccomp uint8
CpusAllowed []uint32
MemsAllowed []uint32
VoluntaryCtxtSwitches uint64
NonvoluntaryCtxtSwitches uint64
}
func ReadProcessStatus(path string) (*ProcessStatus, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
status := ProcessStatus{}
lines := strings.Split(string(b), "\n")
for _, line := range lines {
if strings.Index(line, ":") == -1 {
continue
}
l := strings.Split(line, ":")
k := strings.TrimSpace(l[0])
v := strings.TrimSpace(l[1])
switch k {
case "Name":
status.Name = v
case "State":
status.State = v
case "Tgid":
if status.Tgid, err = strconv.ParseUint(v, 10, 64); err != nil {
return nil, err
}
case "Pid":
if status.Pid, err = strconv.ParseUint(v, 10, 64); err != nil {
return nil, err
}
case "PPid":
if status.PPid, err = strconv.ParseInt(v, 10, 64); err != nil {
return nil, err
}
case "TracerPid":
if status.TracerPid, err = strconv.ParseUint(v, 10, 64); err != nil {
return nil, err
}
case "Uid":
if f := strings.Fields(v); len(f) == 4 {
if status.RealUid, err = strconv.ParseUint(f[0], 10, 64); err != nil {
return nil, err
}
if status.EffectiveUid, err = strconv.ParseUint(f[1], 10, 64); err != nil {
return nil, err
}
if status.SavedSetUid, err = strconv.ParseUint(f[2], 10, 64); err != nil {
return nil, err
}
if status.FilesystemUid, err = strconv.ParseUint(f[3], 10, 64); err != nil {
return nil, err
}
}
case "Gid":
if f := strings.Fields(v); len(f) == 4 {
if status.RealGid, err = strconv.ParseUint(f[0], 10, 64); err != nil {
return nil, err
}
if status.EffectiveGid, err = strconv.ParseUint(f[1], 10, 64); err != nil {
return nil, err
}
if status.SavedSetGid, err = strconv.ParseUint(f[2], 10, 64); err != nil {
return nil, err
}
if status.FilesystemGid, err = strconv.ParseUint(f[3], 10, 64); err != nil {
return nil, err
}
}
case "FDSize":
if status.FDSize, err = strconv.ParseUint(v, 10, 64); err != nil {
return nil, err
}
case "Groups":
{
f := strings.Fields(v)
status.Groups = make([]int64, len(f))
for i := range status.Groups {
if status.Groups[i], err = strconv.ParseInt(f[i], 10, 64); err != nil {
return nil, err
}
}
}
case "VmPeak":
{
f := strings.Fields(v)
if status.VmPeak, err = strconv.ParseUint(f[0], 10, 64); err != nil {
return nil, err
}
}
case "VmSize":
{
f := strings.Fields(v)
if status.VmSize, err = strconv.ParseUint(f[0], 10, 64); err != nil {
return nil, err
}
}
case "VmLck":
{
f := strings.Fields(v)
if status.VmLck, err = strconv.ParseUint(f[0], 10, 64); err != nil {
return nil, err
}
}
case "VmHWM":
{
f := strings.Fields(v)
if status.VmHWM, err = strconv.ParseUint(f[0], 10, 64); err != nil {
return nil, err
}
}
case "VmRSS":
{
f := strings.Fields(v)
if status.VmRSS, err = strconv.ParseUint(f[0], 10, 64); err != nil {
return nil, err
}
}
case "VmData":
{
f := strings.Fields(v)
if status.VmData, err = strconv.ParseUint(f[0], 10, 64); err != nil {
return nil, err
}
}
case "VmStk":
{
f := strings.Fields(v)
if status.VmStk, err = strconv.ParseUint(f[0], 10, 64); err != nil {
return nil, err
}
}
case "VmExe":
{
f := strings.Fields(v)
if status.VmExe, err = strconv.ParseUint(f[0], 10, 64); err != nil {
return nil, err
}
}
case "VmLib":
{
f := strings.Fields(v)
if status.VmLib, err = strconv.ParseUint(f[0], 10, 64); err != nil {
return nil, err
}
}
case "VmPTE":
{
f := strings.Fields(v)
if status.VmPTE, err = strconv.ParseUint(f[0], 10, 64); err != nil {
return nil, err
}
}
case "VmSwap":
{
f := strings.Fields(v)
if status.VmSwap, err = strconv.ParseUint(f[0], 10, 64); err != nil {
return nil, err
}
}
case "Threads":
if status.Threads, err = strconv.ParseUint(v, 10, 64); err != nil {
return nil, err
}
case "SigQ":
{
if f := strings.Split(v, "/"); len(f) == 2 {
if status.SigQLength, err = strconv.ParseUint(f[0], 10, 64); err != nil {
return nil, err
}
if status.SigQLimit, err = strconv.ParseUint(f[1], 10, 64); err != nil {
return nil, err
}
}
}
case "SigPnd":
if status.SigPnd, err = strconv.ParseUint(v, 16, 64); err != nil {
return nil, err
}
case "ShdPnd":
if status.ShdPnd, err = strconv.ParseUint(v, 16, 64); err != nil {
return nil, err
}
case "SigBlk":
if status.SigBlk, err = strconv.ParseUint(v, 16, 64); err != nil {
return nil, err
}
case "SigIgn":
if status.SigIgn, err = strconv.ParseUint(v, 16, 64); err != nil {
return nil, err
}
case "SigCgt":
if status.SigCgt, err = strconv.ParseUint(v, 16, 64); err != nil {
return nil, err
}
case "CapInh":
if status.CapInh, err = strconv.ParseUint(v, 16, 64); err != nil {
return nil, err
}
case "CapPrm":
if status.CapPrm, err = strconv.ParseUint(v, 16, 64); err != nil {
return nil, err
}
case "CapEff":
if status.CapEff, err = strconv.ParseUint(v, 16, 64); err != nil {
return nil, err
}
case "CapBnd":
if status.CapBnd, err = strconv.ParseUint(v, 16, 64); err != nil {
return nil, err
}
case "Seccomp":
{
var n uint64
if n, err = strconv.ParseUint(v, 10, 8); err != nil {
return nil, err
}
status.Seccomp = uint8(n)
}
case "Cpus_allowed":
{
var n uint64
f := strings.Split(v, ",")
status.CpusAllowed = make([]uint32, len(f))
for i := range status.CpusAllowed {
if n, err = strconv.ParseUint(f[i], 16, 32); err != nil {
return nil, err
}
status.CpusAllowed[i] = uint32(n)
}
}
case "Mems_allowed":
{
var n uint64
f := strings.Split(v, ",")
status.MemsAllowed = make([]uint32, len(f))
for i := range status.MemsAllowed {
if n, err = strconv.ParseUint(f[i], 16, 32); err != nil {
return nil, err
}
status.MemsAllowed[i] = uint32(n)
}
}
case "voluntary_ctxt_switches":
if status.VoluntaryCtxtSwitches, err = strconv.ParseUint(v, 10, 64); err != nil {
return nil, err
}
case "nonvoluntary_ctxt_switches":
if status.NonvoluntaryCtxtSwitches, err = strconv.ParseUint(v, 10, 64); err != nil {
return nil, err
}
}
}
return &status, nil
}

144
vendor/github.com/c9s/goprocinfo/linux/snmp.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
package linux
import (
"io/ioutil"
"reflect"
"strconv"
"strings"
)
type Snmp struct {
// Ip
IpForwarding uint64 `json:"ip_forwarding"`
IpDefaultTTL uint64 `json:"ip_default_ttl"`
IpInReceives uint64 `json:"ip_in_receives"`
IpInHdrErrors uint64 `json:"ip_in_hdr_errors"`
IpInAddrErrors uint64 `json:"ip_in_addr_errors"`
IpForwDatagrams uint64 `json:"ip_forw_datagrams"`
IpInUnknownProtos uint64 `json:"ip_in_unknown_protos"`
IpInDiscards uint64 `json:"ip_in_discards"`
IpInDelivers uint64 `json:"ip_in_delivers"`
IpOutRequests uint64 `json:"ip_out_requests"`
IpOutDiscards uint64 `json:"ip_out_discards"`
IpOutNoRoutes uint64 `json:"ip_out_no_routes"`
IpReasmTimeout uint64 `json:"ip_reasm_timeout"`
IpReasmReqds uint64 `json:"ip_reasm_reqds"`
IpReasmOKs uint64 `json:"ip_reasm_oks"`
IpReasmFails uint64 `json:"ip_reasm_fails"`
IpFragOKs uint64 `json:"ip_frag_oks"`
IpFragFails uint64 `json:"ip_frag_fails"`
IpFragCreates uint64 `json:"ip_frag_creates"`
// Icmp
IcmpInMsgs uint64 `json:"icmp_in_msgs"`
IcmpInErrors uint64 `json:"icmp_in_errors"`
IcmpInCsumErrors uint64 `json:"icmp_in_csum_errors"`
IcmpInDestUnreachs uint64 `json:"icmp_in_dest_unreachs"`
IcmpInTimeExcds uint64 `json:"icmp_in_time_excds"`
IcmpInParmProbs uint64 `json:"icmp_in_parm_probs"`
IcmpInSrcQuenchs uint64 `json:"icmp_in_src_quenchs"`
IcmpInRedirects uint64 `json:"icmp_in_redirects"`
IcmpInEchos uint64 `json:"icmp_in_echos"`
IcmpInEchoReps uint64 `json:"icmp_in_echo_reps"`
IcmpInTimestamps uint64 `json:"icmp_in_timestamps"`
IcmpInTimestampReps uint64 `json:"icmp_in_timestamp_reps"`
IcmpInAddrMasks uint64 `json:"icmp_in_addr_masks"`
IcmpInAddrMaskReps uint64 `json:"icmp_in_addr_mask_reps"`
IcmpOutMsgs uint64 `json:"icmp_out_msgs"`
IcmpOutErrors uint64 `json:"icmp_out_errors"`
IcmpOutDestUnreachs uint64 `json:"icmp_out_dest_unreachs"`
IcmpOutTimeExcds uint64 `json:"icmp_out_time_excds"`
IcmpOutParmProbs uint64 `json:"icmp_out_parm_probs"`
IcmpOutSrcQuenchs uint64 `json:"icmp_out_src_quenchs"`
IcmpOutRedirects uint64 `json:"icmp_out_redirects"`
IcmpOutEchos uint64 `json:"icmp_out_echos"`
IcmpOutEchoReps uint64 `json:"icmp_out_echo_reps"`
IcmpOutTimestamps uint64 `json:"icmp_out_timestamps"`
IcmpOutTimestampReps uint64 `json:"icmp_out_timestamp_reps"`
IcmpOutAddrMasks uint64 `json:"icmp_out_addr_masks"`
IcmpOutAddrMaskReps uint64 `json:"icmp_out_addr_mask_reps"`
// IcmpMsg
IcmpMsgInType0 uint64 `json:"icmpmsg_in_type0"`
IcmpMsgInType3 uint64 `json:"icmpmsg_in_type3"`
IcmpMsgInType5 uint64 `json:"icmpmsg_in_type5"`
IcmpMsgInType8 uint64 `json:"icmpmsg_in_type8"`
IcmpMsgInType11 uint64 `json:"icmpmsg_in_type11"`
IcmpMsgInType13 uint64 `json:"icmpmsg_in_type13"`
IcmpMsgOutType0 uint64 `json:"icmpmsg_out_type0"`
IcmpMsgOutType3 uint64 `json:"icmpmsg_out_type3"`
IcmpMsgOutType8 uint64 `json:"icmpmsg_out_type8"`
IcmpMsgOutType14 uint64 `json:"icmpmsg_out_type14"`
IcmpMsgOutType69 uint64 `json:"icmpmsg_out_type69"`
// TCP
TcpRtoAlgorithm uint64 `json:"tcp_rto_algorithm"`
TcpRtoMin uint64 `json:"tcp_rto_min"`
TcpRtoMax uint64 `json:"tcp_rto_max"`
TcpMaxConn uint64 `json:"tcp_max_conn"`
TcpActiveOpens uint64 `json:"tcp_active_opens"`
TcpPassiveOpens uint64 `json:"tcp_passive_opens"`
TcpAttemptFails uint64 `json:"tcp_attempt_fails"`
TcpEstabResets uint64 `json:"tcp_estab_resets"`
TcpCurrEstab uint64 `json:"tcp_curr_estab"`
TcpInSegs uint64 `json:"tcp_in_segs"`
TcpOutSegs uint64 `json:"tcp_out_segs"`
TcpRetransSegs uint64 `json:"tcp_retrans_segs"`
TcpInErrs uint64 `json:"tcp_in_errs"`
TcpOutRsts uint64 `json:"tcp_out_rsts"`
TcpInCsumErrors uint64 `json:"tcp_in_csum_errors"`
// UDP
UdpInDatagrams uint64 `json:"udp_in_datagrams"`
UdpNoPorts uint64 `json:"udp_no_ports"`
UdpInErrors uint64 `json:"udp_in_errors"`
UdpOutDatagrams uint64 `json:"udp_out_datagrams"`
UdpRcvbufErrors uint64 `json:"udp_rcvbuf_errors"`
UdpSndbufErrors uint64 `json:"udp_sndbuf_errors"`
UdpInCsumErrors uint64 `json:"udp_in_csum_errors"`
// UDPLite
UdpLiteInDatagrams uint64 `json:"udp_lite_in_datagrams"`
UdpLiteNoPorts uint64 `json:"udp_lite_no_ports"`
UdpLiteInErrors uint64 `json:"udp_lite_in_errors"`
UdpLiteOutDatagrams uint64 `json:"udp_lite_out_datagrams"`
UdpLiteRcvbufErrors uint64 `json:"udp_lite_rcvbuf_errors"`
UdpLiteSndbufErrors uint64 `json:"udp_lite_sndbuf_errors"`
UdpLiteInCsumErrors uint64 `json:"udp_lite_in_csum_errors"`
}
func ReadSnmp(path string) (*Snmp, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
lines := strings.Split(string(data), "\n")
// Maps an SNMP metric to its value (i.e. SyncookiesSent --> 0)
statMap := make(map[string]string)
// patterns
// Ip: Forwarding DefaultTTL InReceives InHdrErrors... <-- header
// Ip: 2 64 9305753793 0 0 0 0 0... <-- values
for i := 1; i < len(lines); i = i + 2 {
headers := strings.Fields(lines[i-1][strings.Index(lines[i-1], ":")+1:])
values := strings.Fields(lines[i][strings.Index(lines[i], ":")+1:])
protocol := strings.Replace(strings.Fields(lines[i-1])[0], ":", "", -1)
for j, header := range headers {
statMap[protocol+header] = values[j]
}
}
var snmp Snmp = Snmp{}
elem := reflect.ValueOf(&snmp).Elem()
typeOfElem := elem.Type()
for i := 0; i < elem.NumField(); i++ {
if val, ok := statMap[typeOfElem.Field(i).Name]; ok {
parsedVal, _ := strconv.ParseUint(val, 10, 64)
elem.Field(i).SetUint(parsedVal)
}
}
return &snmp, nil
}

82
vendor/github.com/c9s/goprocinfo/linux/sockstat.go generated vendored Normal file
View File

@ -0,0 +1,82 @@
package linux
import (
"io/ioutil"
"reflect"
"strconv"
"strings"
)
type SockStat struct {
// sockets:
SocketsUsed uint64 `json:"sockets_used" field:"sockets.used"`
// TCP:
TCPInUse uint64 `json:"tcp_in_use" field:"TCP.inuse"`
TCPOrphan uint64 `json:"tcp_orphan" field:"TCP.orphan"`
TCPTimeWait uint64 `json:"tcp_time_wait" field:"TCP.tw"`
TCPAllocated uint64 `json:"tcp_allocated" field:"TCP.alloc"`
TCPMemory uint64 `json:"tcp_memory" field:"TCP.mem"`
// UDP:
UDPInUse uint64 `json:"udp_in_use" field:"UDP.inuse"`
UDPMemory uint64 `json:"udp_memory" field:"UDP.mem"`
// UDPLITE:
UDPLITEInUse uint64 `json:"udplite_in_use" field:"UDPLITE.inuse"`
// RAW:
RAWInUse uint64 `json:"raw_in_use" field:"RAW.inuse"`
// FRAG:
FRAGInUse uint64 `json:"frag_in_use" field:"FRAG.inuse"`
FRAGMemory uint64 `json:"frag_memory" field:"FRAG.memory"`
}
func ReadSockStat(path string) (*SockStat, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
lines := strings.Split(string(data), "\n")
// Maps a meminfo metric to its value (i.e. MemTotal --> 100000)
statMap := map[string]uint64{}
var sockStat SockStat = SockStat{}
for _, line := range lines {
if strings.Index(line, ":") == -1 {
continue
}
statType := line[0:strings.Index(line, ":")] + "."
// The fields have this pattern: inuse 27 orphan 1 tw 23 alloc 31 mem 3
// The stats are grouped into pairs and need to be parsed and placed into the stat map.
key := ""
for k, v := range strings.Fields(line[strings.Index(line, ":")+1:]) {
// Every second field is a value.
if (k+1)%2 != 0 {
key = v
continue
}
val, _ := strconv.ParseUint(v, 10, 64)
statMap[statType+key] = val
}
}
elem := reflect.ValueOf(&sockStat).Elem()
typeOfElem := elem.Type()
for i := 0; i < elem.NumField(); i++ {
val, ok := statMap[typeOfElem.Field(i).Tag.Get("field")]
if ok {
elem.Field(i).SetUint(val)
}
}
return &sockStat, nil
}

106
vendor/github.com/c9s/goprocinfo/linux/stat.go generated vendored Normal file
View File

@ -0,0 +1,106 @@
package linux
import (
"io/ioutil"
"strconv"
"strings"
"time"
)
type Stat struct {
CPUStatAll CPUStat `json:"cpu_all"`
CPUStats []CPUStat `json:"cpus"`
Interrupts uint64 `json:"intr"`
ContextSwitches uint64 `json:"ctxt"`
BootTime time.Time `json:"btime"`
Processes uint64 `json:"processes"`
ProcsRunning uint64 `json:"procs_running"`
ProcsBlocked uint64 `json:"procs_blocked"`
}
type CPUStat struct {
Id string `json:"id"`
User uint64 `json:"user"`
Nice uint64 `json:"nice"`
System uint64 `json:"system"`
Idle uint64 `json:"idle"`
IOWait uint64 `json:"iowait"`
IRQ uint64 `json:"irq"`
SoftIRQ uint64 `json:"softirq"`
Steal uint64 `json:"steal"`
Guest uint64 `json:"guest"`
GuestNice uint64 `json:"guest_nice"`
}
func createCPUStat(fields []string) *CPUStat {
s := CPUStat{}
s.Id = fields[0]
for i := 1; i < len(fields); i++ {
v, _ := strconv.ParseUint(fields[i], 10, 64)
switch i {
case 1:
s.User = v
case 2:
s.Nice = v
case 3:
s.System = v
case 4:
s.Idle = v
case 5:
s.IOWait = v
case 6:
s.IRQ = v
case 7:
s.SoftIRQ = v
case 8:
s.Steal = v
case 9:
s.Guest = v
case 10:
s.GuestNice = v
}
}
return &s
}
func ReadStat(path string) (*Stat, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
content := string(b)
lines := strings.Split(content, "\n")
var stat Stat = Stat{}
for i, line := range lines {
fields := strings.Fields(line)
if len(fields) == 0 {
continue
}
if fields[0][:3] == "cpu" {
if cpuStat := createCPUStat(fields); cpuStat != nil {
if i == 0 {
stat.CPUStatAll = *cpuStat
} else {
stat.CPUStats = append(stat.CPUStats, *cpuStat)
}
}
} else if fields[0] == "intr" {
stat.Interrupts, _ = strconv.ParseUint(fields[1], 10, 64)
} else if fields[0] == "ctxt" {
stat.ContextSwitches, _ = strconv.ParseUint(fields[1], 10, 64)
} else if fields[0] == "btime" {
seconds, _ := strconv.ParseInt(fields[1], 10, 64)
stat.BootTime = time.Unix(seconds, 0)
} else if fields[0] == "processes" {
stat.Processes, _ = strconv.ParseUint(fields[1], 10, 64)
} else if fields[0] == "procs_running" {
stat.ProcsRunning, _ = strconv.ParseUint(fields[1], 10, 64)
} else if fields[0] == "procs_blocked" {
stat.ProcsBlocked, _ = strconv.ParseUint(fields[1], 10, 64)
}
}
return &stat, nil
}

43
vendor/github.com/c9s/goprocinfo/linux/uptime.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
package linux
import (
"io/ioutil"
"strconv"
"strings"
"time"
)
type Uptime struct {
Total float64 `json:"total"`
Idle float64 `json:"idle"`
}
func (self *Uptime) GetTotalDuration() time.Duration {
return time.Duration(self.Total) * time.Second
}
func (self *Uptime) GetIdleDuration() time.Duration {
return time.Duration(self.Idle) * time.Second
}
func (self *Uptime) CalculateIdle() float64 {
// XXX
// num2/(num1*N) # N = SMP CPU numbers
return 0
}
func ReadUptime(path string) (*Uptime, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
fields := strings.Fields(string(b))
uptime := Uptime{}
if uptime.Total, err = strconv.ParseFloat(fields[0], 64); err != nil {
return nil, err
}
if uptime.Idle, err = strconv.ParseFloat(fields[1], 64); err != nil {
return nil, err
}
return &uptime, nil
}

373
vendor/github.com/c9s/goprocinfo/linux/vmstat.go generated vendored Normal file
View File

@ -0,0 +1,373 @@
package linux
import (
"io/ioutil"
"strconv"
"strings"
)
type VMStat struct {
NrFreePages uint64 `json:"nr_free_pages"`
NrAllocBatch uint64 `json:"nr_alloc_batch"`
NrInactiveAnon uint64 `json:"nr_inactive_anon"`
NrActiveAnon uint64 `json:"nr_active_anon"`
NrInactiveFile uint64 `json:"nr_inactive_file"`
NrActiveFile uint64 `json:"nr_active_file"`
NrUnevictable uint64 `json:"nr_unevictable"`
NrMlock uint64 `json:"nr_mlock"`
NrAnonPages uint64 `json:"nr_anon_pages"`
NrMapped uint64 `json:"nr_mapped"`
NrFilePages uint64 `json:"nr_file_pages"`
NrDirty uint64 `json:"nr_dirty"`
NrWriteback uint64 `json:"nr_writeback"`
NrSlabReclaimable uint64 `json:"nr_slab_reclaimable"`
NrSlabUnreclaimable uint64 `json:"nr_slab_unreclaimable"`
NrPageTablePages uint64 `json:"nr_page_table_pages"`
NrKernelStack uint64 `json:"nr_kernel_stack"`
NrUnstable uint64 `json:"nr_unstable"`
NrBounce uint64 `json:"nr_bounce"`
NrVmscanWrite uint64 `json:"nr_vmscan_write"`
NrVmscanImmediateReclaim uint64 `json:"nr_vmscan_immediate_reclaim"`
NrWritebackTemp uint64 `json:"nr_writeback_temp"`
NrIsolatedAnon uint64 `json:"nr_isolated_anon"`
NrIsolatedFile uint64 `json:"nr_isolated_file"`
NrShmem uint64 `json:"nr_shmem"`
NrDirtied uint64 `json:"nr_dirtied"`
NrWritten uint64 `json:"nr_written"`
NumaHit uint64 `json:"numa_hit"`
NumaMiss uint64 `json:"numa_miss"`
NumaForeign uint64 `json:"numa_foreign"`
NumaInterleave uint64 `json:"numa_interleave"`
NumaLocal uint64 `json:"numa_local"`
NumaOther uint64 `json:"numa_other"`
WorkingsetRefault uint64 `json:"workingset_refault"`
WorkingsetActivate uint64 `json:"workingset_activate"`
WorkingsetNodereclaim uint64 `json:"workingset_nodereclaim"`
NrAnonTransparentHugepages uint64 `json:"nr_anon_transparent_hugepages"`
NrFreeCma uint64 `json:"nr_free_cma"`
NrDirtyThreshold uint64 `json:"nr_dirty_threshold"`
NrDirtyBackgroundThreshold uint64 `json:"nr_dirty_background_threshold"`
PagePagein uint64 `json:"pgpgin"`
PagePageout uint64 `json:"pgpgout"`
PageSwapin uint64 `json:"pswpin"`
PageSwapout uint64 `json:"pswpout"`
PageAllocDMA uint64 `json:"pgalloc_dma"`
PageAllocDMA32 uint64 `json:"pgalloc_dma32"`
PageAllocNormal uint64 `json:"pgalloc_normal"`
PageAllocMovable uint64 `json:"pgalloc_movable"`
PageFree uint64 `json:"pgfree"`
PageActivate uint64 `json:"pgactivate"`
PageDeactivate uint64 `json:"pgdeactivate"`
PageFault uint64 `json:"pgfault"`
PageMajorFault uint64 `json:"pgmajfault"`
PageRefillDMA uint64 `json:"pgrefill_dma"`
PageRefillDMA32 uint64 `json:"pgrefill_dma32"`
PageRefillMormal uint64 `json:"pgrefill_normal"`
PageRefillMovable uint64 `json:"pgrefill_movable"`
PageStealKswapdDMA uint64 `json:"pgsteal_kswapd_dma"`
PageStealKswapdDMA32 uint64 `json:"pgsteal_kswapd_dma32"`
PageStealKswapdNormal uint64 `json:"pgsteal_kswapd_normal"`
PageStealKswapdMovable uint64 `json:"pgsteal_kswapd_movable"`
PageStealDirectDMA uint64 `json:"pgsteal_direct_dma"`
PageStealDirectDMA32 uint64 `json:"pgsteal_direct_dma32"`
PageStealDirectNormal uint64 `json:"pgsteal_direct_normal"`
PageStealDirectMovable uint64 `json:"pgsteal_direct_movable"`
PageScanKswapdDMA uint64 `json:"pgscan_kswapd_dma"`
PageScanKswapdDMA32 uint64 `json:"pgscan_kswapd_dma32"`
PageScanKswapdNormal uint64 `json:"pgscan_kswapd_normal"`
PageScanKswapdMovable uint64 `json:"pgscan_kswapd_movable"`
PageScanDirectDMA uint64 `json:"pgscan_direct_dma"`
PageScanDirectDMA32 uint64 `json:"pgscan_direct_dma32"`
PageScanDirectNormal uint64 `json:"pgscan_direct_normal"`
PageScanDirectMovable uint64 `json:"pgscan_direct_movable"`
PageScanDirectThrottle uint64 `json:"pgscan_direct_throttle"`
ZoneReclaimFailed uint64 `json:"zone_reclaim_failed"`
PageInodeSteal uint64 `json:"pginodesteal"`
SlabsScanned uint64 `json:"slabs_scanned"`
KswapdInodesteal uint64 `json:"kswapd_inodesteal"`
KswapdLowWatermarkHitQuickly uint64 `json:"kswapd_low_wmark_hit_quickly"`
KswapdHighWatermarkHitQuickly uint64 `json:"kswapd_high_wmark_hit_quickly"`
PageoutRun uint64 `json:"pageoutrun"`
AllocStall uint64 `json:"allocstall"`
PageRotated uint64 `json:"pgrotated"`
DropPagecache uint64 `json:"drop_pagecache"`
DropSlab uint64 `json:"drop_slab"`
NumaPteUpdates uint64 `json:"numa_pte_updates"`
NumaHugePteUpdates uint64 `json:"numa_huge_pte_updates"`
NumaHintFaults uint64 `json:"numa_hint_faults"`
NumaHintFaults_local uint64 `json:"numa_hint_faults_local"`
NumaPagesMigrated uint64 `json:"numa_pages_migrated"`
PageMigrateSuccess uint64 `json:"pgmigrate_success"`
PageMigrateFail uint64 `json:"pgmigrate_fail"`
CompactMigrateScanned uint64 `json:"compact_migrate_scanned"`
CompactFreeScanned uint64 `json:"compact_free_scanned"`
CompactIsolated uint64 `json:"compact_isolated"`
CompactStall uint64 `json:"compact_stall"`
CompactFail uint64 `json:"compact_fail"`
CompactSuccess uint64 `json:"compact_success"`
HtlbBuddyAllocSuccess uint64 `json:"htlb_buddy_alloc_success"`
HtlbBuddyAllocFail uint64 `json:"htlb_buddy_alloc_fail"`
UnevictablePagesCulled uint64 `json:"unevictable_pgs_culled"`
UnevictablePagesScanned uint64 `json:"unevictable_pgs_scanned"`
UnevictablePagesRescued uint64 `json:"unevictable_pgs_rescued"`
UnevictablePagesMlocked uint64 `json:"unevictable_pgs_mlocked"`
UnevictablePagesMunlocked uint64 `json:"unevictable_pgs_munlocked"`
UnevictablePagesCleared uint64 `json:"unevictable_pgs_cleared"`
UnevictablePagesStranded uint64 `json:"unevictable_pgs_stranded"`
THPFaultAlloc uint64 `json:"thp_fault_alloc"`
THPFaultFallback uint64 `json:"thp_fault_fallback"`
THPCollapseAlloc uint64 `json:"thp_collapse_alloc"`
THPCollapseAllocFailed uint64 `json:"thp_collapse_alloc_failed"`
THPSplit uint64 `json:"thp_split"`
THPZeroPageAlloc uint64 `json:"thp_zero_page_alloc"`
THPZeroPageAllocFailed uint64 `json:"thp_zero_page_alloc_failed"`
}
func ReadVMStat(path string) (*VMStat, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
content := string(b)
lines := strings.Split(content, "\n")
vmstat := VMStat{}
for _, line := range lines {
fields := strings.Fields(line)
if len(fields) != 2 {
continue
}
name := fields[0]
value, _ := strconv.ParseUint(fields[1], 10, 64)
switch name {
case "nr_free_pages":
vmstat.NrFreePages = value
case "nr_alloc_batch":
vmstat.NrAllocBatch = value
case "nr_inactive_anon":
vmstat.NrInactiveAnon = value
case "nr_active_anon":
vmstat.NrActiveAnon = value
case "nr_inactive_file":
vmstat.NrInactiveFile = value
case "nr_active_file":
vmstat.NrActiveFile = value
case "nr_unevictable":
vmstat.NrUnevictable = value
case "nr_mlock":
vmstat.NrMlock = value
case "nr_anon_pages":
vmstat.NrAnonPages = value
case "nr_mapped":
vmstat.NrMapped = value
case "nr_file_pages":
vmstat.NrFilePages = value
case "nr_dirty":
vmstat.NrDirty = value
case "nr_writeback":
vmstat.NrWriteback = value
case "nr_slab_reclaimable":
vmstat.NrSlabReclaimable = value
case "nr_slab_unreclaimable":
vmstat.NrSlabUnreclaimable = value
case "nr_page_table_pages":
vmstat.NrPageTablePages = value
case "nr_kernel_stack":
vmstat.NrKernelStack = value
case "nr_unstable":
vmstat.NrUnstable = value
case "nr_bounce":
vmstat.NrBounce = value
case "nr_vmscan_write":
vmstat.NrVmscanWrite = value
case "nr_vmscan_immediate_reclaim":
vmstat.NrVmscanImmediateReclaim = value
case "nr_writeback_temp":
vmstat.NrWritebackTemp = value
case "nr_isolated_anon":
vmstat.NrIsolatedAnon = value
case "nr_isolated_file":
vmstat.NrIsolatedFile = value
case "nr_shmem":
vmstat.NrShmem = value
case "nr_dirtied":
vmstat.NrDirtied = value
case "nr_written":
vmstat.NrWritten = value
case "numa_hit":
vmstat.NumaHit = value
case "numa_miss":
vmstat.NumaMiss = value
case "numa_foreign":
vmstat.NumaForeign = value
case "numa_interleave":
vmstat.NumaInterleave = value
case "numa_local":
vmstat.NumaLocal = value
case "numa_other":
vmstat.NumaOther = value
case "workingset_refault":
vmstat.WorkingsetRefault = value
case "workingset_activate":
vmstat.WorkingsetActivate = value
case "workingset_nodereclaim":
vmstat.WorkingsetNodereclaim = value
case "nr_anon_transparent_hugepages":
vmstat.NrAnonTransparentHugepages = value
case "nr_free_cma":
vmstat.NrFreeCma = value
case "nr_dirty_threshold":
vmstat.NrDirtyThreshold = value
case "nr_dirty_background_threshold":
vmstat.NrDirtyBackgroundThreshold = value
case "pgpgin":
vmstat.PagePagein = value
case "pgpgout":
vmstat.PagePageout = value
case "pswpin":
vmstat.PageSwapin = value
case "pswpout":
vmstat.PageSwapout = value
case "pgalloc_dma":
vmstat.PageAllocDMA = value
case "pgalloc_dma32":
vmstat.PageAllocDMA32 = value
case "pgalloc_normal":
vmstat.PageAllocNormal = value
case "pgalloc_movable":
vmstat.PageAllocMovable = value
case "pgfree":
vmstat.PageFree = value
case "pgactivate":
vmstat.PageActivate = value
case "pgdeactivate":
vmstat.PageDeactivate = value
case "pgfault":
vmstat.PageFault = value
case "pgmajfault":
vmstat.PageMajorFault = value
case "pgrefill_dma":
vmstat.PageRefillDMA = value
case "pgrefill_dma32":
vmstat.PageRefillDMA32 = value
case "pgrefill_normal":
vmstat.PageRefillMormal = value
case "pgrefill_movable":
vmstat.PageRefillMovable = value
case "pgsteal_kswapd_dma":
vmstat.PageStealKswapdDMA = value
case "pgsteal_kswapd_dma32":
vmstat.PageStealKswapdDMA32 = value
case "pgsteal_kswapd_normal":
vmstat.PageStealKswapdNormal = value
case "pgsteal_kswapd_movable":
vmstat.PageStealKswapdMovable = value
case "pgsteal_direct_dma":
vmstat.PageStealDirectDMA = value
case "pgsteal_direct_dma32":
vmstat.PageStealDirectDMA32 = value
case "pgsteal_direct_normal":
vmstat.PageStealDirectNormal = value
case "pgsteal_direct_movable":
vmstat.PageStealDirectMovable = value
case "pgscan_kswapd_dma":
vmstat.PageScanKswapdDMA = value
case "pgscan_kswapd_dma32":
vmstat.PageScanKswapdDMA32 = value
case "pgscan_kswapd_normal":
vmstat.PageScanKswapdNormal = value
case "pgscan_kswapd_movable":
vmstat.PageScanKswapdMovable = value
case "pgscan_direct_dma":
vmstat.PageScanDirectDMA = value
case "pgscan_direct_dma32":
vmstat.PageScanDirectDMA32 = value
case "pgscan_direct_normal":
vmstat.PageScanDirectNormal = value
case "pgscan_direct_movable":
vmstat.PageScanDirectMovable = value
case "pgscan_direct_throttle":
vmstat.PageScanDirectThrottle = value
case "zone_reclaim_failed":
vmstat.ZoneReclaimFailed = value
case "pginodesteal":
vmstat.PageInodeSteal = value
case "slabs_scanned":
vmstat.SlabsScanned = value
case "kswapd_inodesteal":
vmstat.KswapdInodesteal = value
case "kswapd_low_wmark_hit_quickly":
vmstat.KswapdLowWatermarkHitQuickly = value
case "kswapd_high_wmark_hit_quickly":
vmstat.KswapdHighWatermarkHitQuickly = value
case "pageoutrun":
vmstat.PageoutRun = value
case "allocstall":
vmstat.AllocStall = value
case "pgrotated":
vmstat.PageRotated = value
case "drop_pagecache":
vmstat.DropPagecache = value
case "drop_slab":
vmstat.DropSlab = value
case "numa_pte_updates":
vmstat.NumaPteUpdates = value
case "numa_huge_pte_updates":
vmstat.NumaHugePteUpdates = value
case "numa_hint_faults":
vmstat.NumaHintFaults = value
case "numa_hint_faults_local":
vmstat.NumaHintFaults_local = value
case "numa_pages_migrated":
vmstat.NumaPagesMigrated = value
case "pgmigrate_success":
vmstat.PageMigrateSuccess = value
case "pgmigrate_fail":
vmstat.PageMigrateFail = value
case "compact_migrate_scanned":
vmstat.CompactMigrateScanned = value
case "compact_free_scanned":
vmstat.CompactFreeScanned = value
case "compact_isolated":
vmstat.CompactIsolated = value
case "compact_stall":
vmstat.CompactStall = value
case "compact_fail":
vmstat.CompactFail = value
case "compact_success":
vmstat.CompactSuccess = value
case "htlb_buddy_alloc_success":
vmstat.HtlbBuddyAllocSuccess = value
case "htlb_buddy_alloc_fail":
vmstat.HtlbBuddyAllocFail = value
case "unevictable_pgs_culled":
vmstat.UnevictablePagesCulled = value
case "unevictable_pgs_scanned":
vmstat.UnevictablePagesScanned = value
case "unevictable_pgs_rescued":
vmstat.UnevictablePagesRescued = value
case "unevictable_pgs_mlocked":
vmstat.UnevictablePagesMlocked = value
case "unevictable_pgs_munlocked":
vmstat.UnevictablePagesMunlocked = value
case "unevictable_pgs_cleared":
vmstat.UnevictablePagesCleared = value
case "unevictable_pgs_stranded":
vmstat.UnevictablePagesStranded = value
case "thp_fault_alloc":
vmstat.THPFaultAlloc = value
case "thp_fault_fallback":
vmstat.THPFaultFallback = value
case "thp_collapse_alloc":
vmstat.THPCollapseAlloc = value
case "thp_collapse_alloc_failed":
vmstat.THPCollapseAllocFailed = value
case "thp_split":
vmstat.THPSplit = value
case "thp_zero_page_alloc":
vmstat.THPZeroPageAlloc = value
case "thp_zero_page_alloc_failed":
vmstat.THPZeroPageAllocFailed = value
}
}
return &vmstat, nil
}

View File

@ -3,10 +3,8 @@ package wtf
import (
"bytes"
"fmt"
"strings"
"time"
"github.com/rivo/tview"
"strings"
)
//BarGraph lets make graphs
@ -21,18 +19,23 @@ type BarGraph struct {
Position
Data [][2]int64
}
type Bar struct {
Label string
Percent int
ValueLabel string
}
// NewBarGraph initialize your fancy new graph
func NewBarGraph(name string, configKey string, focusable bool) BarGraph {
func NewBarGraph(app *tview.Application, name string, configKey string, focusable bool) BarGraph {
widget := BarGraph{
enabled: Config.UBool(fmt.Sprintf("wtf.mods.%s.enabled", configKey), false),
focusable: focusable,
starChar: Config.UString(fmt.Sprintf("wtf.mods.%s.graphIcon", configKey), name),
starChar: Config.UString(fmt.Sprintf("wtf.mods.%s.graphIcon", configKey), "|"),
maxStars: Config.UInt(fmt.Sprintf("wtf.mods.%s.graphStars", configKey), 20),
Name: Config.UString(fmt.Sprintf("wtf.mods.%s.title", configKey), name),
RefreshInt: Config.UInt(fmt.Sprintf("wtf.mods.%s.refreshInterval", configKey)),
RefreshInt: Config.UInt(fmt.Sprintf("wtf.mods.%s.refreshInterval", configKey), 1),
}
widget.Position = NewPosition(
@ -42,7 +45,7 @@ func NewBarGraph(name string, configKey string, focusable bool) BarGraph {
Config.UInt(fmt.Sprintf("wtf.mods.%s.position.height", configKey)),
)
widget.addView()
widget.addView(app, configKey)
return widget
}
@ -89,7 +92,7 @@ func (widget *BarGraph) TextView() *tview.TextView {
/* -------------------- Unexported Functions -------------------- */
func (widget *BarGraph) addView() {
func (widget *BarGraph) addView(app *tview.Application, configKey string) {
view := tview.NewTextView()
view.SetBackgroundColor(ColorFor(Config.UString("wtf.colors.background", "black")))
@ -97,6 +100,12 @@ func (widget *BarGraph) addView() {
view.SetBorderColor(ColorFor(widget.BorderColor()))
view.SetDynamicColors(true)
view.SetTitle(widget.Name)
view.SetTitleColor(ColorFor(
Config.UString(
fmt.Sprintf("wtf.mods.%s.colors.title", configKey),
Config.UString("wtf.colors.title", "white"),
),
))
view.SetWrap(false)
widget.View = view
@ -104,69 +113,53 @@ func (widget *BarGraph) addView() {
// BuildBars will build a string of * to represent your data of [time][value]
// time should be passed as a int64
func (widget *BarGraph) BuildBars(data [][2]int64) {
func (widget *BarGraph) BuildBars(data []Bar) {
widget.View.SetText(BuildStars(data, widget.maxStars, widget.starChar))
}
//BuildStars build the string to display
func BuildStars(data [][2]int64, maxStars int, starChar string) string {
func BuildStars(data []Bar, maxStars int, starChar string) string {
var buffer bytes.Buffer
//counter to inintialize min value
var count int
//store the max value from the array
var maxValue int
//store the min value from the array
var minValue int
// the number of characters in the longest label
var longestLabel int
//just getting min and max values
for i := range data {
for _, bar := range data {
var val = int(data[i][0])
//initialize the min value
if count == 0 {
minValue = val
}
count++
//update max value
if val > maxValue {
maxValue = val
}
//update minValue
if val < minValue {
minValue = val
if len(bar.Label) > longestLabel {
longestLabel = len(bar.Label)
}
}
// each number = how many stars?
var starRatio = float64(maxStars) / float64((maxValue - minValue))
var starRatio = float64(maxStars) / 100
//build the stars
for i := range data {
var val = int(data[i][0])
for _, bar := range data {
//how many stars for this one?
var starCount = int(float64((val - minValue)) * starRatio)
var starCount = int(float64(bar.Percent) * starRatio)
if starCount == 0 {
starCount = 1
label := bar.ValueLabel
if len(label) == 0 {
label = fmt.Sprint(bar.Percent)
}
//build the actual string
var stars = strings.Repeat(starChar, starCount)
//parse the time
var t = time.Unix(int64(data[i][1]/1000), 0)
//write the line
buffer.WriteString(fmt.Sprintf("%s -\t [red]%s[white] - (%d)\n", t.Format("Jan 02, 2006"), stars, val))
buffer.WriteString(
fmt.Sprintf(
"%s%s[[red]%s[white]%s] %s\n",
bar.Label,
strings.Repeat(" ", longestLabel - len(bar.Label)),
strings.Repeat(starChar, starCount),
strings.Repeat(" ", maxStars - starCount),
label,
),
)
}
return buffer.String()