1
0
mirror of https://github.com/taigrr/wtf synced 2025-01-18 04:03:14 -08:00

go mod vendor update

Signed-off-by: Chris Cummer <chriscummer@me.com>
This commit is contained in:
Chris Cummer
2019-12-14 08:52:34 -08:00
parent 703619bf0a
commit 3d4059de02
665 changed files with 104373 additions and 59789 deletions

83
vendor/github.com/zorkian/go-datadog-api/app_keys.go generated vendored Normal file
View File

@@ -0,0 +1,83 @@
/*
* Datadog API for Go
*
* Please see the included LICENSE file for licensing information.
*
* Copyright 2019 by authors and contributors.
*/
package datadog
import (
"fmt"
)
// APPKey represents an APP key
type APPKey struct {
Owner *string `json:"owner,omitempty"`
Name *string `json:"name,omitemtpy"`
Hash *string `json:"hash,omitempty"`
}
// reqAPPKeys retrieves a slice of all APPKeys.
type reqAPPKeys struct {
APPKeys []APPKey `json:"application_keys,omitempty"`
}
// reqAPPKey is similar to reqAPPKeys, but used for values returned by
// /v1/application_key/<somekey> which contain one object (not list) "application_key"
// (not "application_keys") containing the found key
type reqAPPKey struct {
APPKey *APPKey `json:"application_key"`
}
// GetAPPKeys returns all APP keys or error on failure
func (client *Client) GetAPPKeys() ([]APPKey, error) {
var out reqAPPKeys
if err := client.doJsonRequest("GET", "/v1/application_key", nil, &out); err != nil {
return nil, err
}
return out.APPKeys, nil
}
// GetAPPKey returns a single APP key or error on failure
func (client *Client) GetAPPKey(hash string) (*APPKey, error) {
var out reqAPPKey
if err := client.doJsonRequest("GET", fmt.Sprintf("/v1/application_key/%s", hash), nil, &out); err != nil {
return nil, err
}
return out.APPKey, nil
}
// CreateAPPKey creates an APP key from given name and fills the rest of its
// fields, or returns an error on failure
func (client *Client) CreateAPPKey(name string) (*APPKey, error) {
toPost := struct {
Name *string `json:"name,omitempty"`
}{
&name,
}
var out reqAPPKey
if err := client.doJsonRequest("POST", "/v1/application_key", toPost, &out); err != nil {
return nil, err
}
return out.APPKey, nil
}
// UpdateAPPKey updates given APP key (only Name can be updated), returns an error
func (client *Client) UpdateAPPKey(appkey *APPKey) error {
out := reqAPPKey{APPKey: appkey}
toPost := struct {
Name *string `json:"name,omitempty"`
}{
appkey.Name,
}
return client.doJsonRequest("PUT", fmt.Sprintf("/v1/application_key/%s", *appkey.Hash), toPost, &out)
}
// DeleteAPPKey deletes APP key given by hash, returns an error
func (client *Client) DeleteAPPKey(hash string) error {
return client.doJsonRequest("DELETE", fmt.Sprintf("/v1/application_key/%s", hash), nil, nil)
}

View File

@@ -14,27 +14,29 @@ import (
)
const (
ALERT_GRAPH_WIDGET = "alert_graph"
ALERT_VALUE_WIDGET = "alert_value"
CHANGE_WIDGET = "change"
CHECK_STATUS_WIDGET = "check_status"
DISTRIBUTION_WIDGET = "distribution"
EVENT_STREAM_WIDGET = "event_stream"
EVENT_TIMELINE_WIDGET = "event_timeline"
FREE_TEXT_WIDGET = "free_text"
GROUP_WIDGET = "group"
HEATMAP_WIDGET = "heatmap"
HOSTMAP_WIDGET = "hostmap"
IFRAME_WIDGET = "iframe"
IMAGE_WIDGET = "image"
LOG_STREAM_WIDGET = "log_stream"
MANAGE_STATUS_WIDGET = "manage_status"
NOTE_WIDGET = "note"
QUERY_VALUE_WIDGET = "query_value"
SCATTERPLOT_WIDGET = "scatterplot"
TIMESERIES_WIDGET = "timeseries"
TOPLIST_WIDGET = "toplist"
TRACE_SERVICE_WIDGET = "trace_service"
ALERT_GRAPH_WIDGET = "alert_graph"
ALERT_VALUE_WIDGET = "alert_value"
CHANGE_WIDGET = "change"
CHECK_STATUS_WIDGET = "check_status"
DISTRIBUTION_WIDGET = "distribution"
EVENT_STREAM_WIDGET = "event_stream"
EVENT_TIMELINE_WIDGET = "event_timeline"
FREE_TEXT_WIDGET = "free_text"
GROUP_WIDGET = "group"
HEATMAP_WIDGET = "heatmap"
HOSTMAP_WIDGET = "hostmap"
IFRAME_WIDGET = "iframe"
IMAGE_WIDGET = "image"
LOG_STREAM_WIDGET = "log_stream"
MANAGE_STATUS_WIDGET = "manage_status"
NOTE_WIDGET = "note"
QUERY_VALUE_WIDGET = "query_value"
QUERY_TABLE_WIDGET = "query_table"
SCATTERPLOT_WIDGET = "scatterplot"
SERVICE_LEVEL_OBJECTIVE_WIDGET = "slo"
TIMESERIES_WIDGET = "timeseries"
TOPLIST_WIDGET = "toplist"
TRACE_SERVICE_WIDGET = "trace_service"
)
// BoardWidget represents the structure of any widget. However, the widget Definition structure is
@@ -89,8 +91,12 @@ func (widget *BoardWidget) GetWidgetType() (string, error) {
return NOTE_WIDGET, nil
case QueryValueDefinition:
return QUERY_VALUE_WIDGET, nil
case QueryTableDefinition:
return QUERY_TABLE_WIDGET, nil
case ScatterplotDefinition:
return SCATTERPLOT_WIDGET, nil
case ServiceLevelObjectiveDefinition:
return SERVICE_LEVEL_OBJECTIVE_WIDGET, nil
case TimeseriesDefinition:
return TIMESERIES_WIDGET, nil
case ToplistDefinition:
@@ -238,7 +244,7 @@ type HeatmapRequest struct {
ProcessQuery *WidgetProcessQuery `json:"process_query,omitempty"`
}
// HostmapDefinition represents the definition for a Heatmap widget
// HostmapDefinition represents the definition for a Hostmap widget
type HostmapDefinition struct {
Type *string `json:"type"`
Requests *HostmapRequests `json:"requests"`
@@ -346,7 +352,29 @@ type QueryValueRequest struct {
ProcessQuery *WidgetProcessQuery `json:"process_query,omitempty"`
}
// ScatterplotDefinition represents the definition for a Heatmap widget
// QueryTableDefinition represents the definition for a Table widget
type QueryTableDefinition struct {
Type *string `json:"type"`
Requests []QueryTableRequest `json:"requests"`
Title *string `json:"title,omitempty"`
TitleSize *string `json:"title_size,omitempty"`
TitleAlign *string `json:"title_align,omitempty"`
Time *WidgetTime `json:"time,omitempty"`
}
type QueryTableRequest struct {
Alias *string `json:"alias,omitempty"`
ConditionalFormats []WidgetConditionalFormat `json:"conditional_formats,omitempty"`
Aggregator *string `json:"aggregator,omitempty"`
Limit *int `json:"limit,omitempty"`
Order *string `json:"order,omitempty"`
// A QueryTableRequest should implement exactly one of the following query types
MetricQuery *string `json:"q,omitempty"`
ApmQuery *WidgetApmOrLogQuery `json:"apm_query,omitempty"`
LogQuery *WidgetApmOrLogQuery `json:"log_query,omitempty"`
ProcessQuery *WidgetProcessQuery `json:"process_query,omitempty"`
}
// ScatterplotDefinition represents the definition for a Scatterplot widget
type ScatterplotDefinition struct {
Type *string `json:"type"`
Requests *ScatterplotRequests `json:"requests"`
@@ -371,6 +399,23 @@ type ScatterplotRequest struct {
ProcessQuery *WidgetProcessQuery `json:"process_query,omitempty"`
}
// ServiceLevelObjectiveDefinition represents the definition for a Service Level Objective widget
type ServiceLevelObjectiveDefinition struct {
// Common
Type *string `json:"type"`
Title *string `json:"title,omitempty"`
TitleSize *string `json:"title_size,omitempty"`
TitleAlign *string `json:"title_align,omitempty"`
// SLO specific
ViewType *string `json:"view_type,omitempty"` // currently only "detail" is supported
ServiceLevelObjectiveID *string `json:"slo_id,omitempty"`
ShowErrorBudget *bool `json:"show_error_budget,omitempty"`
ViewMode *string `json:"view_mode,omitempty"` // overall,component,both
TimeWindows []string `json:"time_windows,omitempty"` // 7d,30d,90d,week_to_date,previous_week,month_to_date,previous_month
}
// TimeseriesDefinition represents the definition for a Timeseries widget
type TimeseriesDefinition struct {
Type *string `json:"type"`
@@ -401,7 +446,7 @@ type TimeseriesRequestStyle struct {
LineWidth *string `json:"line_width,omitempty"`
}
// ToplistDefinition represents the definition for a Distribution widget
// ToplistDefinition represents the definition for a Top list widget
type ToplistDefinition struct {
Type *string `json:"type"`
Requests []ToplistRequest `json:"requests"`
@@ -599,6 +644,14 @@ func (widget *BoardWidget) UnmarshalJSON(data []byte) error {
return err
}
widget.Definition = queryValueWidget.Definition
case QUERY_TABLE_WIDGET:
var queryTableWidget struct {
Definition QueryTableDefinition `json:"definition"`
}
if err := json.Unmarshal(data, &queryTableWidget); err != nil {
return err
}
widget.Definition = queryTableWidget.Definition
case SCATTERPLOT_WIDGET:
var scatterplotWidget struct {
Definition ScatterplotDefinition `json:"definition"`
@@ -607,6 +660,14 @@ func (widget *BoardWidget) UnmarshalJSON(data []byte) error {
return err
}
widget.Definition = scatterplotWidget.Definition
case SERVICE_LEVEL_OBJECTIVE_WIDGET:
var serviceLevelObjectiveWidget struct {
Definition ServiceLevelObjectiveDefinition `json:"definition"`
}
if err := json.Unmarshal(data, &serviceLevelObjectiveWidget); err != nil {
return err
}
widget.Definition = serviceLevelObjectiveWidget.Definition
case TIMESERIES_WIDGET:
var timeseriesWidget struct {
Definition TimeseriesDefinition `json:"definition"`
@@ -678,14 +739,16 @@ type WidgetConditionalFormat struct {
ImageUrl *string `json:"image_url,omitempty"`
HideValue *bool `json:"hide_value,omitempty"`
Timeframe *string `json:"timeframe,omitempty"`
Metric *string `json:"metric,omitempty"`
}
// WidgetApmOrLogQuery represents an APM or a Log query
type WidgetApmOrLogQuery struct {
Index *string `json:"index"`
Compute *ApmOrLogQueryCompute `json:"compute"`
Search *ApmOrLogQuerySearch `json:"search,omitempty"`
GroupBy []ApmOrLogQueryGroupBy `json:"group_by,omitempty"`
Index *string `json:"index"`
Compute *ApmOrLogQueryCompute `json:"compute,omitempty"`
MultiCompute []ApmOrLogQueryCompute `json:"multi_compute,omitempty"`
Search *ApmOrLogQuerySearch `json:"search,omitempty"`
GroupBy []ApmOrLogQueryGroupBy `json:"group_by,omitempty"`
}
type ApmOrLogQueryCompute struct {
Aggregation *string `json:"aggregation"`

View File

@@ -29,6 +29,24 @@ type Board struct {
ModifiedAt *string `json:"modified_at,omitempty"`
}
// BoardLite represents a simplify dashboard (without widgets, notify list, ...)
// It's used when we load all boards.
type BoardLite struct {
Title *string `json:"title,omitempty"`
Description *string `json:"description,omitempty"`
LayoutType *string `json:"layout_type,omitempty"`
Id *string `json:"id,omitempty"`
Url *string `json:"url,omitempty"`
AuthorHandle *string `json:"author_handle,omitempty"`
IsReadOnly *bool `json:"is_read_only,omitempty"`
CreatedAt *string `json:"created_at,omitempty"`
ModifiedAt *string `json:"modified_at,omitempty"`
}
type reqGetBoards struct {
Boards []BoardLite `json:"dashboards,omitempty"`
}
// GetBoard returns a single dashboard created on this account.
func (client *Client) GetBoard(id string) (*Board, error) {
var board Board
@@ -57,3 +75,13 @@ func (client *Client) CreateBoard(board *Board) (*Board, error) {
func (client *Client) UpdateBoard(board *Board) error {
return client.doJsonRequest("PUT", fmt.Sprintf("/v1/dashboard/%s", *board.Id), board, nil)
}
// GetBoards returns all Dashboards.
func (client *Client) GetBoards() ([]BoardLite, error) {
var out reqGetBoards
if err := client.doJsonRequest("GET", "/v1/dashboard", nil, &out); err != nil {
return nil, err
}
return out.Boards, nil
}

View File

@@ -24,6 +24,9 @@ type Client struct {
//The Http Client that is used to make requests
HttpClient *http.Client
RetryTimeout time.Duration
//Option to specify extra headers like User-Agent
ExtraHeader map[string]string
}
// valid is the struct to unmarshal validation endpoint responses into.
@@ -37,7 +40,7 @@ type valid struct {
func NewClient(apiKey, appKey string) *Client {
baseUrl := os.Getenv("DATADOG_HOST")
if baseUrl == "" {
baseUrl = "https://app.datadoghq.com"
baseUrl = "https://api.datadoghq.com"
}
return &Client{
@@ -65,7 +68,7 @@ func (c *Client) GetBaseUrl() string {
return c.baseUrl
}
// Validate checks if the API and application keys are valid.
// Validate checks if the API key (not the APP key) is valid.
func (client *Client) Validate() (bool, error) {
var out valid
var resp *http.Response
@@ -79,6 +82,8 @@ func (client *Client) Validate() (bool, error) {
if err != nil {
return false, err
}
req.Header.Set("DD-API-KEY", client.apiKey)
req.Header.Set("DD-APPLICATION-KEY", client.appKey)
resp, err = client.doRequestWithRetries(req, client.RetryTimeout)
if err != nil {
@@ -87,6 +92,10 @@ func (client *Client) Validate() (bool, error) {
defer resp.Body.Close()
if resp.StatusCode == http.StatusForbidden {
return false, nil
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return false, err

View File

@@ -0,0 +1,76 @@
/*
* Datadog API for Go
*
* Please see the included LICENSE file for licensing information.
*
* Copyright 2019 by authors and contributors.
*/
package datadog
import (
"fmt"
)
// DashboardListItemV2 represents a single dashboard in a dashboard list.
type DashboardListItemV2 struct {
ID *string `json:"id,omitempty"`
Type *string `json:"type,omitempty"`
}
type reqDashboardListItemsV2 struct {
Dashboards []DashboardListItemV2 `json:"dashboards,omitempty"`
}
type reqAddedDashboardListItemsV2 struct {
Dashboards []DashboardListItemV2 `json:"added_dashboards_to_list,omitempty"`
}
type reqDeletedDashboardListItemsV2 struct {
Dashboards []DashboardListItemV2 `json:"deleted_dashboards_from_list,omitempty"`
}
// GetDashboardListItemsV2 fetches the dashboard list's dashboard definitions.
func (client *Client) GetDashboardListItemsV2(id int) ([]DashboardListItemV2, error) {
var out reqDashboardListItemsV2
if err := client.doJsonRequest("GET", fmt.Sprintf("/v2/dashboard/lists/manual/%d/dashboards", id), nil, &out); err != nil {
return nil, err
}
return out.Dashboards, nil
}
// AddDashboardListItemsV2 adds dashboards to an existing dashboard list.
//
// Any items already in the list are ignored (not added twice).
func (client *Client) AddDashboardListItemsV2(dashboardListID int, items []DashboardListItemV2) ([]DashboardListItemV2, error) {
req := reqDashboardListItemsV2{items}
var out reqAddedDashboardListItemsV2
if err := client.doJsonRequest("POST", fmt.Sprintf("/v2/dashboard/lists/manual/%d/dashboards", dashboardListID), req, &out); err != nil {
return nil, err
}
return out.Dashboards, nil
}
// UpdateDashboardListItemsV2 updates dashboards of an existing dashboard list.
//
// This will set the list of dashboards to contain only the items in items.
func (client *Client) UpdateDashboardListItemsV2(dashboardListID int, items []DashboardListItemV2) ([]DashboardListItemV2, error) {
req := reqDashboardListItemsV2{items}
var out reqDashboardListItemsV2
if err := client.doJsonRequest("PUT", fmt.Sprintf("/v2/dashboard/lists/manual/%d/dashboards", dashboardListID), req, &out); err != nil {
return nil, err
}
return out.Dashboards, nil
}
// DeleteDashboardListItemsV2 deletes dashboards from an existing dashboard list.
//
// Deletes any dashboards in the list of items from the dashboard list.
func (client *Client) DeleteDashboardListItemsV2(dashboardListID int, items []DashboardListItemV2) ([]DashboardListItemV2, error) {
req := reqDashboardListItemsV2{items}
var out reqDeletedDashboardListItemsV2
if err := client.doJsonRequest("DELETE", fmt.Sprintf("/v2/dashboard/lists/manual/%d/dashboards", dashboardListID), req, &out); err != nil {
return nil, err
}
return out.Dashboards, nil
}

View File

@@ -22,7 +22,6 @@ type GraphDefinitionRequestStyle struct {
// GraphDefinitionRequest represents the requests passed into each graph.
type GraphDefinitionRequest struct {
Query *string `json:"q,omitempty"`
Stacked *bool `json:"stacked,omitempty"`
Aggregator *string `json:"aggregator,omitempty"`
ConditionalFormats []DashboardConditionalFormat `json:"conditional_formats,omitempty"`
@@ -37,6 +36,49 @@ type GraphDefinitionRequest struct {
OrderBy *string `json:"order_by,omitempty"`
ExtraCol *string `json:"extra_col,omitempty"`
Metadata map[string]GraphDefinitionMetadata `json:"metadata,omitempty"`
// A Graph can only have one of these types of query.
Query *string `json:"q,omitempty"`
LogQuery *GraphApmOrLogQuery `json:"log_query,omitempty"`
ApmQuery *GraphApmOrLogQuery `json:"apm_query,omitempty"`
ProcessQuery *GraphProcessQuery `json:"process_query,omitempty"`
}
// GraphApmOrLogQuery represents an APM or a Log query
type GraphApmOrLogQuery struct {
Index *string `json:"index"`
Compute *GraphApmOrLogQueryCompute `json:"compute"`
Search *GraphApmOrLogQuerySearch `json:"search,omitempty"`
GroupBy []GraphApmOrLogQueryGroupBy `json:"groupBy,omitempty"`
}
type GraphApmOrLogQueryCompute struct {
Aggregation *string `json:"aggregation"`
Facet *string `json:"facet,omitempty"`
Interval *int `json:"interval,omitempty"`
}
type GraphApmOrLogQuerySearch struct {
Query *string `json:"query"`
}
type GraphApmOrLogQueryGroupBy struct {
Facet *string `json:"facet"`
Limit *int `json:"limit,omitempty"`
Sort *GraphApmOrLogQueryGroupBySort `json:"sort,omitempty"`
}
type GraphApmOrLogQueryGroupBySort struct {
Aggregation *string `json:"aggregation"`
Order *string `json:"order"`
Facet *string `json:"facet,omitempty"`
}
type GraphProcessQuery struct {
Metric *string `json:"metric"`
SearchBy *string `json:"search_by,omitempty"`
FilterBy []string `json:"filter_by,omitempty"`
Limit *int `json:"limit,omitempty"`
}
type GraphDefinitionMetadata TileDefMetadata

File diff suppressed because it is too large Load Diff

View File

@@ -11,6 +11,7 @@ package datadog
import (
"encoding/json"
"errors"
"math"
"strconv"
)
@@ -32,6 +33,10 @@ func GetBool(v *bool) (bool, bool) {
// to store v and returns a pointer to it.
func Int(v int) *int { return &v }
// Int64 is a helper routine that allocates a new int64 value to
// store v and return a pointer to it.
func Int64(v int64) *int64 { return &v }
// GetIntOk is a helper routine that returns a boolean representing
// if a value was set, and if so, dereferences the pointer to it.
func GetIntOk(v *int) (int, bool) {
@@ -42,6 +47,25 @@ func GetIntOk(v *int) (int, bool) {
return 0, false
}
// Float64 is a helper routine that allocates a new float64 value
// to store v and returns a pointer to it.
func Float64(v float64) *float64 { return &v }
// GetFloat64Ok is a helper routine that returns a boolean representing
// if a value was set, and if so, dereferences the pointer to it.
func GetFloat64Ok(v *float64) (float64, bool) {
if v != nil {
return *v, true
}
return 0, false
}
// Float64AlmostEqual will return true if two floats are within a certain tolerance of each other
func Float64AlmostEqual(a, b, tolerance float64) bool {
return math.Abs(a-b) < tolerance
}
// String is a helper routine that allocates a new string value
// to store v and returns a pointer to it.
func String(v string) *string { return &v }

View File

@@ -31,3 +31,20 @@ func (client *Client) UnmuteHost(host string) (*HostActionResp, error) {
}
return &out, nil
}
// HostTotalsResp defines response to GET /v1/hosts/totals.
type HostTotalsResp struct {
TotalUp *int `json:"total_up"`
TotalActive *int `json:"total_active"`
}
// GetHostTotals returns number of total active hosts and total up hosts.
// Active means the host has reported in the past hour, and up means it has reported in the past two hours.
func (client *Client) GetHostTotals() (*HostTotalsResp, error) {
var out HostTotalsResp
uri := "/v1/hosts/totals"
if err := client.doJsonRequest("GET", uri, nil, &out); err != nil {
return nil, err
}
return &out, nil
}

View File

@@ -8,6 +8,8 @@
package datadog
import "net/url"
/*
PagerDuty Integration
*/
@@ -44,7 +46,7 @@ type IntegrationPDRequest struct {
// Use this if you want to setup the integration for the first time
// or to add more services/schedules.
func (client *Client) CreateIntegrationPD(pdIntegration *IntegrationPDRequest) error {
return client.doJsonRequest("POST", "/v1/integration/pagerduty", pdIntegration, nil)
return client.doJsonRequest("PUT", "/v1/integration/pagerduty", pdIntegration, nil)
}
// UpdateIntegrationPD updates the PagerDuty Integration.
@@ -68,6 +70,41 @@ func (client *Client) DeleteIntegrationPD() error {
return client.doJsonRequest("DELETE", "/v1/integration/pagerduty", nil, nil)
}
// CreateIntegrationPDService creates a single service object in the PagerDuty integration
// Note that creating a service object requires the integration to be activated
func (client *Client) CreateIntegrationPDService(serviceObject *ServicePDRequest) error {
return client.doJsonRequest("POST", "/v1/integration/pagerduty/configuration/services", serviceObject, nil)
}
// UpdateIntegrationPDService updates a single service object in the PagerDuty integration
func (client *Client) UpdateIntegrationPDService(serviceObject *ServicePDRequest) error {
// we can only post the ServiceKey, not ServiceName
toPost := struct {
ServiceKey *string `json:"service_key,omitempty"`
}{
serviceObject.ServiceKey,
}
uri := "/v1/integration/pagerduty/configuration/services/" + *serviceObject.ServiceName
return client.doJsonRequest("PUT", uri, toPost, nil)
}
// GetIntegrationPDService gets a single service object in the PagerDuty integration
// NOTE: the service key is never returned by the API, so it won't be set
func (client *Client) GetIntegrationPDService(serviceName string) (*ServicePDRequest, error) {
uri := "/v1/integration/pagerduty/configuration/services/" + serviceName
var out ServicePDRequest
if err := client.doJsonRequest("GET", uri, nil, &out); err != nil {
return nil, err
}
return &out, nil
}
// DeleteIntegrationPDService deletes a single service object in the PagerDuty integration
func (client *Client) DeleteIntegrationPDService(serviceName string) error {
uri := "/v1/integration/pagerduty/configuration/services/" + serviceName
return client.doJsonRequest("DELETE", uri, nil, nil)
}
/*
Slack Integration
*/
@@ -150,6 +187,28 @@ type IntegrationAWSAccountDeleteRequest struct {
RoleName *string `json:"role_name"`
}
type IntegrationAWSLambdaARNRequest struct {
AccountID *string `json:"account_id"`
LambdaARN *string `json:"lambda_arn"`
}
// IntegrationAWSLambdaARN is only defined to properly parse the AWS logs GET response
type IntegrationAWSLambdaARN struct {
LambdaARN *string `json:"arn"`
}
type IntegrationAWSServicesLogCollection struct {
AccountID *string `json:"account_id"`
Services []string `json:"services"`
}
// IntegrationAWSLogs is only defined to properly parse the AWS logs GET response
type IntegrationAWSLogCollection struct {
AccountID *string `json:"account_id"`
LambdaARNs []IntegrationAWSLambdaARN `json:"lambdas"`
Services []string `json:"services"`
}
// CreateIntegrationAWS adds a new AWS Account in the AWS Integrations.
// Use this if you want to setup the integration for the first time
// or to add more accounts.
@@ -162,6 +221,14 @@ func (client *Client) CreateIntegrationAWS(awsAccount *IntegrationAWSAccount) (*
return &out, nil
}
// UpdateIntegrationAWS updates an already existing AWS Account in the AWS Integration
func (client *Client) UpdateIntegrationAWS(awsAccount *IntegrationAWSAccount) error {
additionalParameters := url.Values{}
additionalParameters.Set("account_id", *awsAccount.AccountID)
additionalParameters.Add("role_name", *awsAccount.RoleName)
return client.doJsonRequest("PUT", "/v1/integration/aws?"+additionalParameters.Encode(), awsAccount, nil)
}
// GetIntegrationAWS gets all the AWS Accounts in the AWS Integrations from Datadog.
func (client *Client) GetIntegrationAWS() (*[]IntegrationAWSAccount, error) {
var response IntegrationAWSAccountGetResponse
@@ -177,6 +244,31 @@ func (client *Client) DeleteIntegrationAWS(awsAccount *IntegrationAWSAccountDele
return client.doJsonRequest("DELETE", "/v1/integration/aws", awsAccount, nil)
}
// AttachLambdaARNIntegrationAWS attach a lambda ARN to an AWS account ID to enable log collection
func (client *Client) AttachLambdaARNIntegrationAWS(lambdaARN *IntegrationAWSLambdaARNRequest) error {
return client.doJsonRequest("POST", "/v1/integration/aws/logs", lambdaARN, nil)
}
// EnableLogCollectionAWSServices enables the log collection for the given AWS services
func (client *Client) EnableLogCollectionAWSServices(services *IntegrationAWSServicesLogCollection) error {
return client.doJsonRequest("POST", "/v1/integration/aws/logs/services", services, nil)
}
// GetIntegrationAWSLogCollection gets all the configuration for the AWS log collection
func (client *Client) GetIntegrationAWSLogCollection() (*[]IntegrationAWSLogCollection, error) {
var response []IntegrationAWSLogCollection
if err := client.doJsonRequest("GET", "/v1/integration/aws/logs", nil, &response); err != nil {
return nil, err
}
return &response, nil
}
// DeleteAWSLogCollection removes the log collection configuration for a given ARN and AWS account
func (client *Client) DeleteAWSLogCollection(lambdaARN *IntegrationAWSLambdaARNRequest) error {
return client.doJsonRequest("DELETE", "/v1/integration/aws/logs", lambdaARN, nil)
}
/*
Google Cloud Platform Integration
*/

39
vendor/github.com/zorkian/go-datadog-api/ip_ranges.go generated vendored Normal file
View File

@@ -0,0 +1,39 @@
/*
* Datadog API for Go
*
* Please see the included LICENSE file for licensing information.
*
* Copyright 2019 by authors and contributors.
*/
package datadog
import (
"fmt"
)
// IP ranges US: https://ip-ranges.datadoghq.com
// EU: https://ip-ranges.datadoghq.eu
// Same structure
type IPRangesResp struct {
Agents map[string][]string `json:"agents"`
API map[string][]string `json:"api"`
Apm map[string][]string `json:"apm"`
Logs map[string][]string `json:"logs"`
Process map[string][]string `json:"process"`
Synthetics map[string][]string `json:"synthetics"`
Webhooks map[string][]string `json:"webhooks"`
}
// GetIPRanges returns all IP addresses by section: agents, api, apm, logs, process, synthetics, webhooks
func (client *Client) GetIPRanges() (*IPRangesResp, error) {
var out IPRangesResp
urlIPRanges, err := client.URLIPRanges()
if err != nil {
return nil, fmt.Errorf("Error getting IP Ranges URL: %s", err)
}
if err := client.doJsonRequest("GET", urlIPRanges, nil, &out); err != nil {
return nil, err
}
return &out, nil
}

View File

@@ -0,0 +1,26 @@
package datadog
const logsIndexListPath = "/v1/logs/config/index-order"
// LogsIndexList represents the index list object from config API.
type LogsIndexList struct {
IndexNames []string `json:"index_names"`
}
// GetLogsIndexList gets the full list of available indexes by their names.
func (client *Client) GetLogsIndexList() (*LogsIndexList, error) {
var indexList LogsIndexList
if err := client.doJsonRequest("GET", logsIndexListPath, nil, &indexList); err != nil {
return nil, err
}
return &indexList, nil
}
// UpdateLogsIndexList updates the order of indexes.
func (client *Client) UpdateLogsIndexList(indexList *LogsIndexList) (*LogsIndexList, error) {
var updatedIndexList = &LogsIndexList{}
if err := client.doJsonRequest("PUT", logsIndexListPath, indexList, updatedIndexList); err != nil {
return nil, err
}
return updatedIndexList, nil
}

View File

@@ -0,0 +1,48 @@
package datadog
import (
"fmt"
)
const logsIndexPath = "/v1/logs/config/indexes"
// LogsIndex represents the Logs index object from config API.
type LogsIndex struct {
Name *string `json:"name"`
NumRetentionDays *int64 `json:"num_retention_days,omitempty"`
DailyLimit *int64 `json:"daily_limit,omitempty"`
IsRateLimited *bool `json:"is_rate_limited,omitempty"`
Filter *FilterConfiguration `json:"filter"`
ExclusionFilters []ExclusionFilter `json:"exclusion_filters"`
}
// ExclusionFilter represents the index exclusion filter object from config API.
type ExclusionFilter struct {
Name *string `json:"name"`
IsEnabled *bool `json:"is_enabled,omitempty"`
Filter *Filter `json:"filter"`
}
// Filter represents the index filter object from config API.
type Filter struct {
Query *string `json:"query,omitempty"`
SampleRate *float64 `json:"sample_rate,omitempty"`
}
// GetLogsIndex gets the specific logs index by specific name.
func (client *Client) GetLogsIndex(name string) (*LogsIndex, error) {
var index LogsIndex
if err := client.doJsonRequest("GET", fmt.Sprintf("%s/%s", logsIndexPath, name), nil, &index); err != nil {
return nil, err
}
return &index, nil
}
// UpdateLogsIndex updates the specific index by it's name.
func (client *Client) UpdateLogsIndex(name string, index *LogsIndex) (*LogsIndex, error) {
var updatedIndex = &LogsIndex{}
if err := client.doJsonRequest("PUT", fmt.Sprintf("%s/%s", logsIndexPath, name), index, updatedIndex); err != nil {
return nil, err
}
return updatedIndex, nil
}

View File

@@ -0,0 +1,29 @@
package datadog
const (
logsPipelineListPath = "/v1/logs/config/pipeline-order"
)
// LogsPipelineList struct represents the pipeline order from Logs Public Config API.
type LogsPipelineList struct {
PipelineIds []string `json:"pipeline_ids"`
}
// GetLogsPipelineList get the full list of created pipelines.
func (client *Client) GetLogsPipelineList() (*LogsPipelineList, error) {
var pipelineList LogsPipelineList
if err := client.doJsonRequest("GET", logsPipelineListPath, nil, &pipelineList); err != nil {
return nil, err
}
return &pipelineList, nil
}
// UpdateLogsPipelineList updates the pipeline list order, it returns error (422 Unprocessable Entity)
// if one tries to delete or add pipeline.
func (client *Client) UpdateLogsPipelineList(pipelineList *LogsPipelineList) (*LogsPipelineList, error) {
var updatedPipelineList = &LogsPipelineList{}
if err := client.doJsonRequest("PUT", logsPipelineListPath, pipelineList, updatedPipelineList); err != nil {
return nil, err
}
return updatedPipelineList, nil
}

View File

@@ -0,0 +1,65 @@
/*
* Datadog API for Go
*
* Please see the included LICENSE file for licensing information.
*
* Copyright 2019 by authors and contributors.
*/
package datadog
import (
"fmt"
)
const (
logsPipelinesPath = "/v1/logs/config/pipelines"
)
// LogsPipeline struct to represent the json object received from Logs Public Config API.
type LogsPipeline struct {
Id *string `json:"id,omitempty"`
Type *string `json:"type,omitempty"`
Name *string `json:"name"`
IsEnabled *bool `json:"is_enabled,omitempty"`
IsReadOnly *bool `json:"is_read_only,omitempty"`
Filter *FilterConfiguration `json:"filter"`
Processors []LogsProcessor `json:"processors,omitempty"`
}
// FilterConfiguration struct to represent the json object of filter configuration.
type FilterConfiguration struct {
Query *string `json:"query"`
}
// GetLogsPipeline queries Logs Public Config API with given a pipeline id for the complete pipeline object.
func (client *Client) GetLogsPipeline(id string) (*LogsPipeline, error) {
var pipeline LogsPipeline
if err := client.doJsonRequest("GET", fmt.Sprintf("%s/%s", logsPipelinesPath, id), nil, &pipeline); err != nil {
return nil, err
}
return &pipeline, nil
}
// CreateLogsPipeline sends pipeline creation request to Config API
func (client *Client) CreateLogsPipeline(pipeline *LogsPipeline) (*LogsPipeline, error) {
var createdPipeline = &LogsPipeline{}
if err := client.doJsonRequest("POST", logsPipelinesPath, pipeline, createdPipeline); err != nil {
return nil, err
}
return createdPipeline, nil
}
// UpdateLogsPipeline updates the pipeline object of a given pipeline id.
func (client *Client) UpdateLogsPipeline(id string, pipeline *LogsPipeline) (*LogsPipeline, error) {
var updatedPipeline = &LogsPipeline{}
if err := client.doJsonRequest("PUT", fmt.Sprintf("%s/%s", logsPipelinesPath, id), pipeline, updatedPipeline); err != nil {
return nil, err
}
return updatedPipeline, nil
}
// DeleteLogsPipeline deletes the pipeline for a given id, returns 200 OK when operation succeed
func (client *Client) DeleteLogsPipeline(id string) error {
return client.doJsonRequest("DELETE", fmt.Sprintf("%s/%s", logsPipelinesPath, id), nil, nil)
}

View File

@@ -0,0 +1,231 @@
package datadog
import (
"encoding/json"
"fmt"
)
const (
ArithmeticProcessorType = "arithmetic-processor"
AttributeRemapperType = "attribute-remapper"
CategoryProcessorType = "category-processor"
DateRemapperType = "date-remapper"
GeoIPParserType = "geo-ip-parser"
GrokParserType = "grok-parser"
MessageRemapperType = "message-remapper"
NestedPipelineType = "pipeline"
ServiceRemapperType = "service-remapper"
StatusRemapperType = "status-remapper"
StringBuilderProcessorType = "string-builder-processor"
TraceIdRemapperType = "trace-id-remapper"
UrlParserType = "url-parser"
UserAgentParserType = "user-agent-parser"
)
// LogsProcessor struct represents the processor object from Config API.
type LogsProcessor struct {
Name *string `json:"name"`
IsEnabled *bool `json:"is_enabled"`
Type *string `json:"type"`
Definition interface{} `json:"definition"`
}
// ArithmeticProcessor struct represents unique part of arithmetic processor
// object from config API.
type ArithmeticProcessor struct {
Expression *string `json:"expression"`
Target *string `json:"target"`
IsReplaceMissing *bool `json:"is_replace_missing"`
}
// AttributeRemapper struct represents unique part of attribute remapper object
// from config API.
type AttributeRemapper struct {
Sources []string `json:"sources"`
SourceType *string `json:"source_type"`
Target *string `json:"target"`
TargetType *string `json:"target_type"`
PreserveSource *bool `json:"preserve_source"`
OverrideOnConflict *bool `json:"override_on_conflict"`
}
// CategoryProcessor struct represents unique part of category processor object
// from config API.
type CategoryProcessor struct {
Target *string `json:"target"`
Categories []Category `json:"categories"`
}
// Category represents category object from config API.
type Category struct {
Name *string `json:"name"`
Filter *FilterConfiguration `json:"filter"`
}
// SourceRemapper represents the object from config API that contains
// only a list of sources.
type SourceRemapper struct {
Sources []string `json:"sources"`
}
// GeoIPParser represents geoIpParser object from config API.
type GeoIPParser struct {
Sources []string `json:"sources"`
Target *string `json:"target"`
}
type StringBuilderProcessor struct {
Template *string `json:"template"`
Target *string `json:"target"`
IsReplaceMissing *bool `json:"is_replace_missing"`
}
// GrokParser represents the grok parser processor object from config API.
type GrokParser struct {
Source *string `json:"source"`
Samples []string `json:"samples"`
GrokRule *GrokRule `json:"grok"`
}
// GrokRule represents the rules for grok parser from config API.
type GrokRule struct {
SupportRules *string `json:"support_rules"`
MatchRules *string `json:"match_rules"`
}
// NestedPipeline represents the pipeline as processor from config API.
type NestedPipeline struct {
Filter *FilterConfiguration `json:"filter"`
Processors []LogsProcessor `json:"processors,omitempty"`
}
// UrlParser represents the url parser from config API.
type UrlParser struct {
Sources []string `json:"sources"`
Target *string `json:"target"`
NormalizeEndingSlashes *bool `json:"normalize_ending_slashes"`
}
// UserAgentParser represents the user agent parser from config API.
type UserAgentParser struct {
Sources []string `json:"sources"`
Target *string `json:"target"`
IsEncoded *bool `json:"is_encoded"`
}
// buildProcessor converts processor Definition of type interface{} to a map of string and interface{}.
// Simple cast from interface{} to map[string]interface{} will not work for our case here,
// since the underlying types of Definition are the processor structs.
func buildProcessor(definition interface{}) (map[string]interface{}, error) {
inrec, err := json.Marshal(definition)
if err != nil {
return nil, err
}
var processor map[string]interface{}
if err = json.Unmarshal(inrec, &processor); err != nil {
return nil, err
}
return processor, err
}
// MarshalJSON serializes logsprocessor struct to config API compatible json object.
func (processor *LogsProcessor) MarshalJSON() ([]byte, error) {
mapProcessor, err := buildProcessor(processor.Definition)
if err != nil {
return nil, err
}
mapProcessor["name"] = processor.Name
mapProcessor["is_enabled"] = processor.IsEnabled
mapProcessor["type"] = processor.Type
jsn, err := json.Marshal(mapProcessor)
if err != nil {
return nil, err
}
return jsn, err
}
// UnmarshalJSON deserializes the config API json object to LogsProcessor struct.
func (processor *LogsProcessor) UnmarshalJSON(data []byte) error {
var processorHandler struct {
Type *string `json:"type"`
Name *string `json:"name"`
IsEnabled *bool `json:"is_enabled"`
}
if err := json.Unmarshal(data, &processorHandler); err != nil {
return err
}
processor.Name = processorHandler.Name
processor.IsEnabled = processorHandler.IsEnabled
processor.Type = processorHandler.Type
switch *processorHandler.Type {
case ArithmeticProcessorType:
var arithmeticProcessor ArithmeticProcessor
if err := json.Unmarshal(data, &arithmeticProcessor); err != nil {
return err
}
processor.Definition = arithmeticProcessor
case AttributeRemapperType:
var attributeRemapper AttributeRemapper
if err := json.Unmarshal(data, &attributeRemapper); err != nil {
return err
}
processor.Definition = attributeRemapper
case CategoryProcessorType:
var categoryProcessor CategoryProcessor
if err := json.Unmarshal(data, &categoryProcessor); err != nil {
return err
}
processor.Definition = categoryProcessor
case DateRemapperType,
MessageRemapperType,
ServiceRemapperType,
StatusRemapperType,
TraceIdRemapperType:
var sourceRemapper SourceRemapper
if err := json.Unmarshal(data, &sourceRemapper); err != nil {
return err
}
processor.Definition = sourceRemapper
case GeoIPParserType:
var geoIPParser GeoIPParser
if err := json.Unmarshal(data, &geoIPParser); err != nil {
return err
}
processor.Definition = geoIPParser
case GrokParserType:
var grokParser GrokParser
if err := json.Unmarshal(data, &grokParser); err != nil {
return err
}
processor.Definition = grokParser
case NestedPipelineType:
var nestedPipeline NestedPipeline
if err := json.Unmarshal(data, &nestedPipeline); err != nil {
return err
}
processor.Definition = nestedPipeline
case StringBuilderProcessorType:
var stringBuilder StringBuilderProcessor
if err := json.Unmarshal(data, &stringBuilder); err != nil {
return err
}
processor.Definition = stringBuilder
case UrlParserType:
var urlParser UrlParser
if err := json.Unmarshal(data, &urlParser); err != nil {
return err
}
processor.Definition = urlParser
case UserAgentParserType:
var userAgentParser UserAgentParser
if err := json.Unmarshal(data, &userAgentParser); err != nil {
return err
}
processor.Definition = userAgentParser
default:
return fmt.Errorf("cannot unmarshal processor of type: %s", *processorHandler.Type)
}
return nil
}

View File

@@ -182,33 +182,18 @@ func (client *Client) GetMonitor(id int) (*Monitor, error) {
}
// GetMonitorsByName retrieves monitors by name
func (self *Client) GetMonitorsByName(name string) ([]Monitor, error) {
var out reqMonitors
query, err := url.ParseQuery(fmt.Sprintf("name=%v", name))
if err != nil {
return nil, err
}
err = self.doJsonRequest("GET", fmt.Sprintf("/v1/monitor?%v", query.Encode()), nil, &out.Monitors)
if err != nil {
return nil, err
}
return out.Monitors, nil
func (client *Client) GetMonitorsByName(name string) ([]Monitor, error) {
return client.GetMonitorsWithOptions(MonitorQueryOpts{Name: &name})
}
// GetMonitorsByTags retrieves monitors by a slice of tags
func (self *Client) GetMonitorsByTags(tags []string) ([]Monitor, error) {
var out reqMonitors
query, err := url.ParseQuery(fmt.Sprintf("monitor_tags=%v", strings.Join(tags, ",")))
if err != nil {
return nil, err
}
func (client *Client) GetMonitorsByTags(tags []string) ([]Monitor, error) {
return client.GetMonitorsWithOptions(MonitorQueryOpts{Tags: tags})
}
err = self.doJsonRequest("GET", fmt.Sprintf("/v1/monitor?%v", query.Encode()), nil, &out.Monitors)
if err != nil {
return nil, err
}
return out.Monitors, nil
// GetMonitorsByMonitorTags retrieves monitors by a slice of monitor tags
func (client *Client) GetMonitorsByMonitorTags(tags []string) ([]Monitor, error) {
return client.GetMonitorsWithOptions(MonitorQueryOpts{MonitorTags: tags})
}
// DeleteMonitor removes a monitor from the system
@@ -219,8 +204,53 @@ func (client *Client) DeleteMonitor(id int) error {
// GetMonitors returns a slice of all monitors
func (client *Client) GetMonitors() ([]Monitor, error) {
return client.GetMonitorsWithOptions(MonitorQueryOpts{})
}
// MonitorQueryOpts contains the options supported by
// https://docs.datadoghq.com/api/?lang=bash#get-all-monitor-details
type MonitorQueryOpts struct {
GroupStates []string
Name *string
Tags []string
MonitorTags []string
WithDowntimes *bool
}
// GetMonitorsWithOptions returns a slice of all monitors
// It supports all the options for querying
func (client *Client) GetMonitorsWithOptions(opts MonitorQueryOpts) ([]Monitor, error) {
var out reqMonitors
if err := client.doJsonRequest("GET", "/v1/monitor", nil, &out.Monitors); err != nil {
var query []string
if len(opts.Tags) > 0 {
value := fmt.Sprintf("tags=%v", strings.Join(opts.Tags, ","))
query = append(query, value)
}
if len(opts.GroupStates) > 0 {
value := fmt.Sprintf("group_states=%v", strings.Join(opts.GroupStates, ","))
query = append(query, value)
}
if len(opts.MonitorTags) > 0 {
value := fmt.Sprintf("monitor_tags=%v", strings.Join(opts.MonitorTags, ","))
query = append(query, value)
}
if v, ok := opts.GetWithDowntimesOk(); ok {
query = append(query, fmt.Sprintf("with_downtimes=%t", v))
}
if v, ok := opts.GetNameOk(); ok {
query = append(query, fmt.Sprintf("name=%s", v))
}
queryString, err := url.ParseQuery(strings.Join(query, "&"))
if err != nil {
return nil, err
}
err = client.doJsonRequest("GET", fmt.Sprintf("/v1/monitor?%v", queryString.Encode()), nil, &out.Monitors)
if err != nil {
return nil, err
}
return out.Monitors, nil

View File

@@ -28,20 +28,58 @@ type Response struct {
Error string `json:"error"`
}
// uriForAPI is to be called with something like "/v1/events" and it will give
// the proper request URI to be posted to.
func (client *Client) apiAcceptsKeysInHeaders(api string) bool {
for _, prefix := range []string{"/v1/series", "/v1/check_run", "/v1/events", "/v1/screen"} {
if strings.HasPrefix(api, prefix) {
return false
}
}
return true
}
// uriForAPI is to be called with either an API resource like "/v1/events"
// or a full URL like the IP Ranges one
// and it will give the proper request URI to be posted to.
func (client *Client) uriForAPI(api string) (string, error) {
apiBase, err := url.Parse(client.baseUrl + "/api" + api)
var err error
// If api is a URI such as /v1/hosts/, /v2/dashboards... add credentials and return a properly formatted URL
if !(strings.HasPrefix(api, "https://") || strings.HasPrefix(api, "http://")) {
apiBase, err := url.Parse(client.baseUrl + "/api" + api)
if err != nil {
return "", err
}
q := apiBase.Query()
if !client.apiAcceptsKeysInHeaders(api) {
q.Add("api_key", client.apiKey)
q.Add("application_key", client.appKey)
}
apiBase.RawQuery = q.Encode()
return apiBase.String(), nil
}
// if api is a generic URL we simply return it
apiBase, err := url.Parse(api)
if err != nil {
return "", err
}
q := apiBase.Query()
q.Add("api_key", client.apiKey)
q.Add("application_key", client.appKey)
apiBase.RawQuery = q.Encode()
return apiBase.String(), nil
}
// URLIPRanges returns the IP Ranges URL used to whitelist IP addresses in use to send data to Datadog
// agents, api, apm, logs, process, synthetics, webhooks
func (client *Client) URLIPRanges() (string, error) {
baseURL := client.GetBaseUrl()
// Get the domain from the URL: eu, com...
domain := strings.Split(baseURL, ".")[2]
var urlIPRanges string
switch domain {
case "eu":
urlIPRanges = "https://ip-ranges.datadoghq.eu"
case "com":
urlIPRanges = "https://ip-ranges.datadoghq.com"
}
return urlIPRanges, nil
}
// redactError removes api and application keys from error strings
func (client *Client) redactError(err error) error {
if err == nil {
@@ -206,8 +244,16 @@ func (client *Client) createRequest(method, api string, reqbody interface{}) (*h
if err != nil {
return nil, err
}
if client.apiAcceptsKeysInHeaders(api) {
req.Header.Set("DD-API-KEY", client.apiKey)
req.Header.Set("DD-APPLICATION-KEY", client.appKey)
}
if bodyReader != nil {
req.Header.Add("Content-Type", "application/json")
}
for k, v := range client.ExtraHeader {
req.Header.Add(k, v)
}
return req, nil
}

View File

@@ -57,7 +57,6 @@ type TileDefMarker struct {
}
type TileDefRequest struct {
Query *string `json:"q,omitempty"`
// For Hostmap
Type *string `json:"type,omitempty"`
@@ -69,6 +68,12 @@ type TileDefRequest struct {
TagFilters []*string `json:"tag_filters"`
Limit *int `json:"limit,omitempty"`
// A Widget can only have one of these types of query.
Query *string `json:"q,omitempty"`
LogQuery *TileDefApmOrLogQuery `json:"log_query,omitempty"`
ApmQuery *TileDefApmOrLogQuery `json:"apm_query,omitempty"`
ProcessQuery *TileDefProcessQuery `json:"process_query,omitempty"`
ConditionalFormats []ConditionalFormat `json:"conditional_formats,omitempty"`
Style *TileDefRequestStyle `json:"style,omitempty"`
Aggregator *string `json:"aggregator,omitempty"`
@@ -81,12 +86,45 @@ type TileDefRequest struct {
Metadata map[string]TileDefMetadata `json:"metadata,omitempty"`
}
// TileDefApmOrLogQuery represents an APM or a Log query
type TileDefApmOrLogQuery struct {
Index *string `json:"index"`
Compute *TileDefApmOrLogQueryCompute `json:"compute"`
Search *TileDefApmOrLogQuerySearch `json:"search,omitempty"`
GroupBy []TileDefApmOrLogQueryGroupBy `json:"groupBy,omitempty"`
}
type TileDefApmOrLogQueryCompute struct {
Aggregation *string `json:"aggregation"`
Facet *string `json:"facet,omitempty"`
Interval *string `json:"interval,omitempty"`
}
type TileDefApmOrLogQuerySearch struct {
Query *string `json:"query"`
}
type TileDefApmOrLogQueryGroupBy struct {
Facet *string `json:"facet"`
Limit *int `json:"limit,omitempty"`
Sort *TileDefApmOrLogQueryGroupBySort `json:"sort,omitempty"`
}
type TileDefApmOrLogQueryGroupBySort struct {
Aggregation *string `json:"aggregation"`
Order *string `json:"order"`
Facet *string `json:"facet,omitempty"`
}
type TileDefProcessQuery struct {
Metric *string `json:"metric"`
SearchBy *string `json:"search_by,omitempty"`
FilterBy []string `json:"filter_by,omitempty"`
Limit *int `json:"limit,omitempty"`
}
type TileDefMetadata struct {
Alias *string `json:"alias,omitempty"`
}
type ConditionalFormat struct {
Color *string `json:"color,omitempty"`
Color *string `json:"custom_fg_color,omitempty"`
Palette *string `json:"palette,omitempty"`
Comparator *string `json:"comparator,omitempty"`
Invert *bool `json:"invert,omitempty"`

View File

@@ -0,0 +1,596 @@
/*
* Datadog API for Go
*
* Please see the included LICENSE file for licensing information.
*
* Copyright 2017 by authors and contributors.
*/
package datadog
import (
"encoding/json"
"fmt"
"net/url"
"regexp"
"strings"
"time"
)
// Define the available machine-readable SLO types
const (
ServiceLevelObjectiveTypeMonitorID int = 0
ServiceLevelObjectiveTypeMetricID int = 1
)
// Define the available human-readable SLO types
var (
ServiceLevelObjectiveTypeMonitor = "monitor"
ServiceLevelObjectiveTypeMetric = "metric"
)
// ServiceLevelObjectiveTypeFromID maps machine-readable type to human-readable type
var ServiceLevelObjectiveTypeFromID = map[int]string{
ServiceLevelObjectiveTypeMonitorID: ServiceLevelObjectiveTypeMonitor,
ServiceLevelObjectiveTypeMetricID: ServiceLevelObjectiveTypeMetric,
}
// ServiceLevelObjectiveTypeToID maps human-readable type to machine-readable type
var ServiceLevelObjectiveTypeToID = map[string]int{
ServiceLevelObjectiveTypeMonitor: ServiceLevelObjectiveTypeMonitorID,
ServiceLevelObjectiveTypeMetric: ServiceLevelObjectiveTypeMetricID,
}
// ServiceLevelObjectiveThreshold defines an SLO threshold and timeframe
// For example it's the `<SLO: ex 99.999%> of <SLI> within <TimeFrame: ex 7d>
type ServiceLevelObjectiveThreshold struct {
TimeFrame *string `json:"timeframe,omitempty"`
Target *float64 `json:"target,omitempty"`
TargetDisplay *string `json:"target_display,omitempty"` // Read-Only for monitor type
Warning *float64 `json:"warning,omitempty"`
WarningDisplay *string `json:"warning_display,omitempty"` // Read-Only for monitor type
}
const thresholdTolerance float64 = 1e-8
// Equal check if one threshold is equal to another.
func (s *ServiceLevelObjectiveThreshold) Equal(o interface{}) bool {
other, ok := o.(*ServiceLevelObjectiveThreshold)
if !ok {
return false
}
return s.GetTimeFrame() == other.GetTimeFrame() &&
Float64AlmostEqual(s.GetTarget(), other.GetTarget(), thresholdTolerance) &&
Float64AlmostEqual(s.GetWarning(), other.GetWarning(), thresholdTolerance)
}
// String implements Stringer
func (s ServiceLevelObjectiveThreshold) String() string {
return fmt.Sprintf("Threshold{timeframe=%s target=%f target_display=%s warning=%f warning_display=%s",
s.GetTimeFrame(), s.GetTarget(), s.GetTargetDisplay(), s.GetWarning(), s.GetWarningDisplay())
}
// ServiceLevelObjectiveMetricQuery represents a metric-based SLO definition query
// Numerator is the sum of the `good` events
// Denominator is the sum of the `total` events
type ServiceLevelObjectiveMetricQuery struct {
Numerator *string `json:"numerator,omitempty"`
Denominator *string `json:"denominator,omitempty"`
}
// ServiceLevelObjectiveThresholds is a sortable array of ServiceLevelObjectiveThreshold(s)
type ServiceLevelObjectiveThresholds []*ServiceLevelObjectiveThreshold
// Len implements sort.Interface length
func (s ServiceLevelObjectiveThresholds) Len() int {
return len(s)
}
// Less implements sort.Interface less comparator
func (s ServiceLevelObjectiveThresholds) Less(i, j int) bool {
iDur, _ := ServiceLevelObjectiveTimeFrameToDuration(s[i].GetTimeFrame())
jDur, _ := ServiceLevelObjectiveTimeFrameToDuration(s[j].GetTimeFrame())
return iDur < jDur
}
// Swap implements sort.Interface swap method
func (s ServiceLevelObjectiveThresholds) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Equal check if one set of thresholds is equal to another.
func (s ServiceLevelObjectiveThresholds) Equal(o interface{}) bool {
other, ok := o.(ServiceLevelObjectiveThresholds)
if !ok {
return false
}
if len(s) != len(other) {
// easy case
return false
}
// compare one set from another
sSet := make(map[string]*ServiceLevelObjectiveThreshold, 0)
for _, t := range s {
sSet[t.GetTimeFrame()] = t
}
oSet := make(map[string]*ServiceLevelObjectiveThreshold, 0)
for _, t := range other {
oSet[t.GetTimeFrame()] = t
}
for timeframe, t := range oSet {
threshold, ok := sSet[timeframe]
if !ok {
// other contains more
return false
}
if !threshold.Equal(t) {
// they differ
return false
}
// drop from sSet for efficiency
delete(sSet, timeframe)
}
// if there are any remaining then they differ
if len(sSet) > 0 {
return false
}
return true
}
// ServiceLevelObjective defines the Service Level Objective entity
type ServiceLevelObjective struct {
// Common
ID *string `json:"id,omitempty"`
Name *string `json:"name,omitempty"`
Description *string `json:"description,omitempty"`
Tags []string `json:"tags,omitempty"`
Thresholds ServiceLevelObjectiveThresholds `json:"thresholds,omitempty"`
Type *string `json:"type,omitempty"`
TypeID *int `json:"type_id,omitempty"` // Read-Only
// SLI definition
Query *ServiceLevelObjectiveMetricQuery `json:"query,omitempty"`
MonitorIDs []int `json:"monitor_ids,omitempty"`
MonitorSearch *string `json:"monitor_search,omitempty"`
Groups []string `json:"groups,omitempty"`
// Informational
MonitorTags []string `json:"monitor_tags,omitempty"` // Read-Only
Creator *Creator `json:"creator,omitempty"` // Read-Only
CreatedAt *int `json:"created_at,omitempty"` // Read-Only
ModifiedAt *int `json:"modified_at,omitempty"` // Read-Only
}
// MarshalJSON implements custom marshaler to ignore some fields
func (s *ServiceLevelObjective) MarshalJSON() ([]byte, error) {
var output struct {
ID *string `json:"id,omitempty"`
Name *string `json:"name,omitempty"`
Description *string `json:"description,omitempty"`
Tags []string `json:"tags,omitempty"`
Thresholds ServiceLevelObjectiveThresholds `json:"thresholds,omitempty"`
Type *string `json:"type,omitempty"`
// SLI definition
Query *ServiceLevelObjectiveMetricQuery `json:"query,omitempty"`
MonitorIDs []int `json:"monitor_ids,omitempty"`
MonitorSearch *string `json:"monitor_search,omitempty"`
Groups []string `json:"groups,omitempty"`
}
output.ID = s.ID
output.Name = s.Name
output.Description = s.Description
output.Tags = s.Tags
output.Thresholds = s.Thresholds
output.Type = s.Type
output.Query = s.Query
output.MonitorIDs = s.MonitorIDs
output.MonitorSearch = s.MonitorSearch
output.Groups = s.Groups
return json.Marshal(&output)
}
var sloTimeFrameToDurationRegex = regexp.MustCompile(`(?P<quantity>\d+)(?P<unit>(d))`)
// ServiceLevelObjectiveTimeFrameToDuration will convert a timeframe into a duration
func ServiceLevelObjectiveTimeFrameToDuration(timeframe string) (time.Duration, error) {
match := sloTimeFrameToDurationRegex.FindStringSubmatch(timeframe)
result := make(map[string]string)
for i, name := range sloTimeFrameToDurationRegex.SubexpNames() {
if i != 0 && name != "" {
result[name] = match[i]
}
}
if len(result) != 2 {
return 0, fmt.Errorf("invalid timeframe specified: '%s'", timeframe)
}
qty, err := json.Number(result["quantity"]).Int64()
if err != nil {
return 0, fmt.Errorf("invalid timeframe specified, could not convert quantity to number")
}
if qty <= 0 {
return 0, fmt.Errorf("invalid timeframe specified, quantity must be a positive number")
}
switch result["unit"] {
// FUTURE: will support more time frames, hence the switch here.
default:
// only matches on `d` currently, so this is simple
return time.Hour * 24 * time.Duration(qty), nil
}
}
// CreateServiceLevelObjective adds a new service level objective to the system. This returns a pointer
// to the service level objective so you can pass that to UpdateServiceLevelObjective or DeleteServiceLevelObjective
// later if needed.
func (client *Client) CreateServiceLevelObjective(slo *ServiceLevelObjective) (*ServiceLevelObjective, error) {
var out reqServiceLevelObjectives
if slo == nil {
return nil, fmt.Errorf("no SLO specified")
}
if err := client.doJsonRequest("POST", "/v1/slo", slo, &out); err != nil {
return nil, err
}
if out.Error != "" {
return nil, fmt.Errorf(out.Error)
}
return out.Data[0], nil
}
// UpdateServiceLevelObjective takes a service level objective that was previously retrieved through some method
// and sends it back to the server.
func (client *Client) UpdateServiceLevelObjective(slo *ServiceLevelObjective) (*ServiceLevelObjective, error) {
var out reqServiceLevelObjectives
if slo == nil {
return nil, fmt.Errorf("no SLO specified")
}
if _, ok := slo.GetIDOk(); !ok {
return nil, fmt.Errorf("SLO must be created first")
}
if err := client.doJsonRequest("PUT", fmt.Sprintf("/v1/slo/%s", slo.GetID()), slo, &out); err != nil {
return nil, err
}
if out.Error != "" {
return nil, fmt.Errorf(out.Error)
}
return out.Data[0], nil
}
type reqServiceLevelObjectives struct {
Data []*ServiceLevelObjective `json:"data"`
Error string `json:"error"`
}
// SearchServiceLevelObjectives searches for service level objectives by search criteria.
// limit will limit the amount of SLO's returned, the API will enforce a maximum and default to a minimum if not specified
func (client *Client) SearchServiceLevelObjectives(limit int, offset int, query string, ids []string) ([]*ServiceLevelObjective, error) {
var out reqServiceLevelObjectives
uriValues := make(url.Values, 0)
if limit > 0 {
uriValues.Set("limit", fmt.Sprintf("%d", limit))
}
if offset >= 0 {
uriValues.Set("offset", fmt.Sprintf("%d", offset))
}
// Either use `query` or use `ids`
hasQuery := strings.TrimSpace(query) != ""
hasIDs := len(ids) > 0
if hasQuery && hasIDs {
return nil, fmt.Errorf("invalid search: must specify either ids OR query, not both")
}
// specify by query
if hasQuery {
uriValues.Set("query", query)
}
// specify by `ids`
if hasIDs {
uriValues.Set("ids", strings.Join(ids, ","))
}
uri := "/v1/slo?" + uriValues.Encode()
if err := client.doJsonRequest("GET", uri, nil, &out); err != nil {
return nil, err
}
if out.Error != "" {
return nil, fmt.Errorf(out.Error)
}
return out.Data, nil
}
type reqSingleServiceLevelObjective struct {
Data *ServiceLevelObjective `json:"data"`
Error string `json:"error"`
}
// GetServiceLevelObjective retrieves an service level objective by identifier.
func (client *Client) GetServiceLevelObjective(id string) (*ServiceLevelObjective, error) {
var out reqSingleServiceLevelObjective
if id == "" {
return nil, fmt.Errorf("no SLO specified")
}
if err := client.doJsonRequest("GET", fmt.Sprintf("/v1/slo/%s", id), nil, &out); err != nil {
return nil, err
}
if out.Error != "" {
return nil, fmt.Errorf(out.Error)
}
return out.Data, nil
}
type reqDeleteResp struct {
Data []string `json:"data"`
Error string `json:"error"`
}
// DeleteServiceLevelObjective removes an service level objective from the system.
func (client *Client) DeleteServiceLevelObjective(id string) error {
var out reqDeleteResp
if id == "" {
return fmt.Errorf("no SLO specified")
}
if err := client.doJsonRequest("DELETE", fmt.Sprintf("/v1/slo/%s", id), nil, &out); err != nil {
return err
}
if out.Error != "" {
return fmt.Errorf(out.Error)
}
return nil
}
// DeleteServiceLevelObjectives removes multiple service level objective from the system by id.
func (client *Client) DeleteServiceLevelObjectives(ids []string) error {
var out reqDeleteResp
if len(ids) == 0 {
return fmt.Errorf("no SLOs specified")
}
if err := client.doJsonRequest("DELETE", "/v1/slo", ids, &out); err != nil {
return err
}
if out.Error != "" {
return fmt.Errorf(out.Error)
}
return nil
}
// ServiceLevelObjectiveDeleteTimeFramesResponse is the response unique to the delete individual time-frames request
// this is read-only
type ServiceLevelObjectiveDeleteTimeFramesResponse struct {
DeletedIDs []string `json:"deleted"`
UpdatedIDs []string `json:"updated"`
}
// ServiceLevelObjectiveDeleteTimeFramesError is the error specific to deleting individual time frames.
// It contains more detailed information than the standard error.
type ServiceLevelObjectiveDeleteTimeFramesError struct {
ID *string `json:"id"`
TimeFrame *string `json:"timeframe"`
Message *string `json:"message"`
}
// Error computes the human readable error
func (e ServiceLevelObjectiveDeleteTimeFramesError) Error() string {
return fmt.Sprintf("error=%s id=%s for timeframe=%s", e.GetMessage(), e.GetID(), e.GetTimeFrame())
}
type timeframesDeleteResp struct {
Data *ServiceLevelObjectiveDeleteTimeFramesResponse `json:"data"`
Errors []*ServiceLevelObjectiveDeleteTimeFramesError `json:"errors"`
}
// DeleteServiceLevelObjectiveTimeFrames will delete SLO timeframes individually.
// This is useful if you have a SLO with 3 time windows and only need to delete some of the time windows.
// It will do a full delete if all time windows are removed as a result.
//
// Example:
// SLO `12345678901234567890123456789012` was defined with 2 time frames: "7d" and "30d"
// SLO `abcdefabcdefabcdefabcdefabcdefab` was defined with 2 time frames: "30d" and "90d"
//
// When we delete `7d` from `12345678901234567890123456789012` we still have `30d` timeframe remaining, hence this is "updated"
// When we delete `30d` and `90d` from `abcdefabcdefabcdefabcdefabcdefab` we are left with 0 time frames, hence this is "deleted"
// and the entire SLO config is deleted
func (client *Client) DeleteServiceLevelObjectiveTimeFrames(timeframeByID map[string][]string) (*ServiceLevelObjectiveDeleteTimeFramesResponse, error) {
var out timeframesDeleteResp
if len(timeframeByID) == 0 {
return nil, fmt.Errorf("nothing specified")
}
if err := client.doJsonRequest("POST", "/v1/slo/bulk_delete", &timeframeByID, &out); err != nil {
return nil, err
}
if out.Errors != nil && len(out.Errors) > 0 {
errMsgs := make([]string, 0)
for _, e := range out.Errors {
errMsgs = append(errMsgs, e.Error())
}
return nil, fmt.Errorf("errors deleting timeframes: %s", strings.Join(errMsgs, ","))
}
return out.Data, nil
}
// ServiceLevelObjectivesCanDeleteResponse is the response for a check can delete SLO endpoint.
type ServiceLevelObjectivesCanDeleteResponse struct {
Data struct {
OK []string `json:"ok"`
} `json:"data"`
Errors map[string]string `json:"errors"`
}
// CheckCanDeleteServiceLevelObjectives checks if the SLO is referenced within Datadog.
// This is useful to prevent accidental deletion.
func (client *Client) CheckCanDeleteServiceLevelObjectives(ids []string) (*ServiceLevelObjectivesCanDeleteResponse, error) {
var out ServiceLevelObjectivesCanDeleteResponse
if len(ids) == 0 {
return nil, fmt.Errorf("nothing specified")
}
uriValues := make(url.Values, 0)
uriValues.Set("ids", strings.Join(ids, ","))
uri := "/v1/slo/can_delete?" + uriValues.Encode()
if err := client.doJsonRequest("GET", uri, nil, &out); err != nil {
return nil, err
}
return &out, nil
}
// ServiceLevelObjectiveHistorySeriesPoint is a convenient wrapper for (timestamp, value) history data response.
type ServiceLevelObjectiveHistorySeriesPoint [2]json.Number
// ServiceLevelObjectiveHistoryMetricSeriesData contains the `batch_query` like history data for `metric` based SLOs
type ServiceLevelObjectiveHistoryMetricSeriesData struct {
Count int64 `json:"count"`
Sum json.Number `json:"sum"`
MetaData struct {
QueryIndex int `json:"query_index"`
Aggregator string `json:"aggr"`
Scope string `json:"scope"`
Metric string `json:"metric"`
Expression string `json:"expression"`
Unit *string `json:"unit"`
} `json:"metadata"`
Values []json.Number `json:"values"`
Times []int64 `json:"times"`
}
// ValuesAsFloats will transform all the values into a slice of float64
func (d *ServiceLevelObjectiveHistoryMetricSeriesData) ValuesAsFloats() ([]float64, error) {
out := make([]float64, len(d.Values))
for i := 0; i < len(d.Values); i++ {
v, err := d.Values[i].Float64()
if err != nil {
return out, fmt.Errorf("could not deserialize value at index %d: %s", i, err)
}
out[i] = v
}
return out, nil
}
// ValuesAsInt64s will transform all the values into a slice of int64
func (d *ServiceLevelObjectiveHistoryMetricSeriesData) ValuesAsInt64s() ([]int64, error) {
out := make([]int64, len(d.Values))
for i := 0; i < len(d.Values); i++ {
v, err := d.Values[i].Int64()
if err != nil {
return out, fmt.Errorf("could not deserialize value at index %d: %s", i, err)
}
out[i] = v
}
return out, nil
}
// ServiceLevelObjectiveHistoryMetricSeries defines the SLO history data response for `metric` type SLOs
type ServiceLevelObjectiveHistoryMetricSeries struct {
ResultType string `json:"res_type"`
Interval int `json:"interval"`
ResponseVersion json.Number `json:"resp_version"`
Query string `json:"query"` // a CSV of <numerator>, <denominator> queries
Message string `json:"message"` // optional message if there are specific query issues/warnings
Numerator *ServiceLevelObjectiveHistoryMetricSeriesData `json:"numerator"`
Denominator *ServiceLevelObjectiveHistoryMetricSeriesData `json:"denominator"`
}
// ServiceLevelObjectiveHistoryMonitorSeries defines the SLO history data response for `monitor` type SLOs
type ServiceLevelObjectiveHistoryMonitorSeries struct {
Uptime float32 `json:"uptime"`
SpanPrecision json.Number `json:"span_precision"`
Name string `json:"name"`
Precision map[string]json.Number `json:"precision"`
Preview bool `json:"preview"`
History []ServiceLevelObjectiveHistorySeriesPoint `json:"history"`
}
// ServiceLevelObjectiveHistoryOverall defines the overall SLO history data response
// for `monitor` type SLOs there is an additional `History` property that rolls up the overall state correctly.
type ServiceLevelObjectiveHistoryOverall struct {
Uptime float32 `json:"uptime"`
SpanPrecision json.Number `json:"span_precision"`
Name string `json:"name"`
Precision map[string]json.Number `json:"precision"`
Preview bool `json:"preview"`
// Monitor extension
History []ServiceLevelObjectiveHistorySeriesPoint `json:"history"`
}
// ServiceLevelObjectiveHistoryResponseData contains the SLO history data response.
// for `monitor` based SLOs use the `Groups` property for historical data along with the `Overall.History`
// for `metric` based SLOs use the `Metrics` property for historical data. This contains `batch_query` like response
// data
type ServiceLevelObjectiveHistoryResponseData struct {
Errors []string `json:"errors"`
ToTs int64 `json:"to_ts"`
FromTs int64 `json:"from_ts"`
Thresholds map[string]ServiceLevelObjectiveThreshold `json:"thresholds"`
Overall *ServiceLevelObjectiveHistoryOverall `json:"overall"`
// metric based SLO
Metrics *ServiceLevelObjectiveHistoryMetricSeries `json:"series"`
// monitor based SLO
Groups []*ServiceLevelObjectiveHistoryMonitorSeries `json:"groups"`
}
// ServiceLevelObjectiveHistoryResponse is the canonical response for SLO history data.
type ServiceLevelObjectiveHistoryResponse struct {
Data *ServiceLevelObjectiveHistoryResponseData `json:"data"`
Error *string `json:"error"`
}
// GetServiceLevelObjectiveHistory will retrieve the history data for a given SLO and provided from/to times
func (client *Client) GetServiceLevelObjectiveHistory(id string, fromTs time.Time, toTs time.Time) (*ServiceLevelObjectiveHistoryResponse, error) {
var out ServiceLevelObjectiveHistoryResponse
if id == "" {
return nil, fmt.Errorf("nothing specified")
}
if !toTs.After(fromTs) {
return nil, fmt.Errorf("toTs must be after fromTs")
}
uriValues := make(url.Values, 0)
uriValues.Set("from_ts", fmt.Sprintf("%d", fromTs.Unix()))
uriValues.Set("to_ts", fmt.Sprintf("%d", toTs.Unix()))
uri := fmt.Sprintf("/v1/slo/%s/history?%s", id, uriValues.Encode())
if err := client.doJsonRequest("GET", uri, nil, &out); err != nil {
return nil, err
}
return &out, nil
}

View File

@@ -11,6 +11,7 @@ type SyntheticsTest struct {
MonitorId *int `json:"monitor_id,omitempty"`
Name *string `json:"name,omitempty"`
Type *string `json:"type,omitempty"`
Subtype *string `json:"subtype,omitempty"`
Tags []string `json:"tags"`
CreatedAt *string `json:"created_at,omitempty"`
ModifiedAt *string `json:"modified_at,omitempty"`
@@ -37,6 +38,8 @@ type SyntheticsRequest struct {
Timeout *int `json:"timeout,omitempty"`
Headers map[string]string `json:"headers,omitempty"`
Body *string `json:"body,omitempty"`
Host *string `json:"host,omitempty"`
Port *int `json:"port,omitempty"`
}
type SyntheticsAssertion struct {
@@ -54,6 +57,7 @@ type SyntheticsOptions struct {
MinFailureDuration *int `json:"min_failure_duration,omitempty"`
MinLocationFailed *int `json:"min_location_failed,omitempty"`
DeviceIds []string `json:"device_ids,omitempty"`
AcceptSelfSigned *bool `json:"accept_self_signed,omitempty"`
}
type SyntheticsUser struct {

View File

@@ -23,6 +23,28 @@ type User struct {
IsAdmin *bool `json:"is_admin,omitempty"`
}
type reqUpdateUser struct {
Email *string `json:"email,omitempty"`
Name *string `json:"name,omitempty"`
Role *string `json:"role,omitempty"`
AccessRole *string `json:"access_role,omitempty"`
Verified *bool `json:"verified,omitempty"`
Disabled *bool `json:"disabled,omitempty"`
IsAdmin *bool `json:"is_admin,omitempty"`
}
func reqUpdateUserFromUser(user User) reqUpdateUser {
return reqUpdateUser{
Email: user.Email,
Name: user.Name,
Role: user.Role,
AccessRole: user.AccessRole,
Verified: user.Verified,
Disabled: user.Disabled,
IsAdmin: user.IsAdmin,
}
}
// reqInviteUsers contains email addresses to send invitations to.
type reqInviteUsers struct {
Emails []string `json:"emails,omitempty"`
@@ -85,7 +107,8 @@ func (client *Client) GetUser(handle string) (user User, err error) {
// and returns an error if the update failed
func (client *Client) UpdateUser(user User) error {
uri := "/v1/user/" + *user.Handle
return client.doJsonRequest("PUT", uri, user, nil)
req := reqUpdateUserFromUser(user)
return client.doJsonRequest("PUT", uri, req, nil)
}
// DeleteUser deletes a user and returns an error if deletion failed