1
0
mirror of https://github.com/taigrr/yq synced 2025-01-18 04:53:17 -08:00

Compare commits

...

840 Commits
1.4 ... master

Author SHA1 Message Date
Mike Farah
fae2b2643c Added gofmt to format command 2021-06-01 10:52:14 +10:00
Mike Farah
dd86b5e7f2 Fixing doc 2021-05-28 17:00:25 +10:00
Mike Farah
f1f75683c1 Fixed nil RHS bug in alternative operator #838 2021-05-28 16:59:02 +10:00
Mike Farah
38b9856f50 Increment version 2021-05-22 08:24:08 +10:00
Mike Farah
48eeb2a9df Fixes update-assign with collect object issue #830 2021-05-22 08:22:45 +10:00
Mike Farah
af283315f2 Increment version 2021-05-21 14:27:24 +10:00
Mike Farah
d18a6963f6 Fixes nested array indexing #824 2021-05-21 14:18:24 +10:00
Mike Farah
77edbb9f5c Fixing readonly ops not to modify context when paths dont exist 2021-05-16 15:02:31 +10:00
Mike Farah
179c44aacc Fixing readonly ops not to modify context when paths dont exist 2021-05-16 14:36:13 +10:00
Mike Farah
bc70c1fb16 Added blank alias example 2021-05-16 14:18:18 +10:00
Mike Farah
0b71a40797 Fixing readonly ops not to modify context when paths dont exist 2021-05-16 14:17:13 +10:00
Mike Farah
3f51a44596 Fixing readonly ops not to modify context when paths dont exist 2021-05-16 14:00:30 +10:00
Mike Farah
afebf0e621 Increment version 2021-05-16 12:43:26 +10:00
Mike Farah
dc464a5b10 Added ability to escape double quotes in double quotes 2021-05-16 12:38:17 +10:00
Mike Farah
5340ed0ad3 Fixed handling of null expressions in equals op 2021-05-16 12:38:17 +10:00
Mike Farah
7b52c5fe0e Increment version 2021-05-14 15:04:56 +10:00
Mike Farah
f4392f8658 Added any_c and all_c operators 2021-05-14 15:03:28 +10:00
Mike Farah
8e14b3b393 Added any and all operators 2021-05-14 14:29:55 +10:00
Mike Farah
8627441705 Added unique operator 2021-05-14 09:43:52 +10:00
Mike Farah
aa95ecd012 Update operator docs 2021-05-11 14:35:59 +10:00
Mike Farah
a2bd463a91 Fixed null issue with entry operators 2021-05-10 10:42:43 +10:00
Mike Farah
6c3965dca3 Increment version 2021-05-10 09:39:59 +10:00
Mike Farah
bb3ffd40b5 Added optional traverse flag 2021-05-09 15:36:33 +10:00
Mike Farah
cc08afc435 Added with_entries 2021-05-09 15:12:50 +10:00
Mike Farah
941a453163 Added from_entries op 2021-05-09 14:18:25 +10:00
Mike Farah
77630ca179 Added to_entries op 2021-05-09 13:59:23 +10:00
Mike Farah
ae4b606707 Fixed merge anchor bug #800 2021-05-09 13:26:02 +10:00
Mike Farah
37f3e21970 Fixed boolean op with empty context issue 2021-05-09 12:44:05 +10:00
Mike Farah
25d0787011 updating operator docs 2021-05-05 15:03:27 +10:00
Mike Farah
b5b8da0a1d Updating comment docs 2021-04-29 13:18:57 +10:00
Mike Farah
fa21510194 Moved multiply doc example lower 2021-04-29 12:03:56 +10:00
Mike Farah
f541194250 Added complex merge example 2021-04-28 20:35:10 +10:00
Mike Farah
38666f4db6 Added another style example for doc 2021-04-26 14:18:18 +10:00
Mike Farah
8b327d0414 Increment version 2021-04-25 12:07:22 +10:00
Mike Farah
c8630fe4f3 Fixes delete issue #793 2021-04-25 12:05:56 +10:00
Mike Farah
87df9b1ae6 Updating operator doc 2021-04-24 17:41:06 +10:00
Mike Farah
2483c38eeb Added command help about using stdin 2021-04-19 10:27:01 +10:00
Mike Farah
b2a538bdfc Better string sub documentation 2021-04-16 16:07:40 +10:00
Mike Farah
6c26344449 Incrementing version 2021-04-15 16:22:24 +10:00
Mike Farah
daf0bfe1b9 Added string substitute command 2021-04-15 16:09:47 +10:00
Mike Farah
750a00ec35 Added "expand" to explode docs for searchability 2021-04-13 13:59:26 +10:00
Mike Farah
25e0a824c5 Fixed alternative operator when LHS has empty matches 2021-04-13 10:53:46 +10:00
Mike Farah
095f921f62 4.6.3 release 2021-03-25 08:45:59 +11:00
Mike Farah
12d3425b4a Added subtract operator (numbers only) 2021-03-25 08:12:01 +11:00
Mike Farah
0249f00bd5 Added chocolate badges thanks @adriens 2021-03-23 20:40:39 +11:00
Mike Farah
42b1bf9678 updated release notes to run gosec manually 2021-03-19 13:41:56 +11:00
Mike Farah
c632fd3641 cannot run gosec on all docker platforms, removing from devtools 2021-03-19 13:40:24 +11:00
Mike Farah
0470e5cd69 cannot run gosec on all docker platforms, removing for now 2021-03-19 13:21:07 +11:00
Mike Farah
35c6d07248 Bump version 2021-03-19 12:59:00 +11:00
Mike Farah
3f0be107d4 Bump dependencies 2021-03-19 12:58:43 +11:00
Mike Farah
21a9e506cb Fixed merge comments 2021-03-19 12:54:03 +11:00
Mike Farah
3722367fbb Dont print doc separators for JSON (https://github.com/mikefarah/yq/issues/735) 2021-03-19 12:40:56 +11:00
Mike Farah
f7b50e9853 Fixed += operator (https://github.com/mikefarah/yq/issues/750) 2021-03-19 12:36:05 +11:00
Mike Farah
4f3fe256aa Fixed precedence of CREATE_MAP (https://github.com/mikefarah/yq/issues/753) 2021-03-19 12:09:32 +11:00
Mike Farah
2595a929c9 Fixing doc links 2021-03-19 11:52:13 +11:00
Mike Farah
b2186d5404 Added gosec 2021-03-03 19:44:34 +11:00
Mike Farah
e93c43f7a0 Improving docs 2021-02-26 11:31:43 +11:00
Mike Farah
b1a5387cdd Update readme 2021-02-25 17:04:08 +11:00
Mike Farah
5911ab2929 Bump version 2021-02-25 16:52:49 +11:00
Mike Farah
2ed5b2ff59 Improved lexer performance! 2021-02-25 16:47:55 +11:00
Mike Farah
111c6e0be1 Increment version 2021-02-18 11:19:16 +11:00
Mike Farah
81136ad57e Arrays no longer deeply merge by defauly, like jq 2021-02-18 11:16:54 +11:00
Mike Farah
a6cd250987 nicer reduce example 2021-02-15 18:23:50 +11:00
Mike Farah
ee1f55630f nicer reduce example 2021-02-15 17:33:41 +11:00
Mike Farah
9072e8d3b3 Added context variable for reduce 2021-02-15 17:31:12 +11:00
Mike Farah
99b08fd612 Added reduce examples and doc 2021-02-15 16:38:53 +11:00
Mike Farah
b2317a14ef infix reduce 2021-02-15 16:06:37 +11:00
Mike Farah
3e5f7b147f infix reduce 2021-02-15 15:31:55 +11:00
Mike Farah
c4faa70143 wip - reduce! 2021-02-15 14:27:00 +11:00
Mike Farah
4d6d07ec43 Force re-release 2021-02-11 10:58:40 +11:00
Mike Farah
bd0818c481 Fixed write-inplace with no expression 2021-02-11 10:58:40 +11:00
Ben Moss
26742b2597 Fix pretty-printing
Fixes #716 #715
2021-02-11 10:58:40 +11:00
Mike Farah
64c618c041 Version increment 2021-02-11 10:58:40 +11:00
Mike Farah
c4c8e5e7b0 Preserve comments on map keys 2021-02-11 10:58:40 +11:00
Mike Farah
e02ad4d7e8 Added space example to docs 2021-02-11 10:58:40 +11:00
Mike Farah
dd17f072cf update deps 2021-02-11 10:58:40 +11:00
Mike Farah
429c3ca65b Fixed merge dropping anchors 2021-02-11 10:58:40 +11:00
Mike Farah
cfcac6d1dc improving docs 2021-02-11 10:58:40 +11:00
Mike Farah
a5ddbca97f Fixing special character example 2021-02-11 10:58:40 +11:00
Mike Farah
30027a8cf4 Added not equals operator 2021-02-11 10:58:40 +11:00
Mike Farah
f92a42e4f8 Equals now only compares scalars 2021-02-11 10:58:40 +11:00
Mike Farah
3c466dc66e Fixed delete bug 2021-02-11 10:58:40 +11:00
Mike Farah
0816e16d30 fixed instructions 2021-02-11 10:58:40 +11:00
Mike Farah
802d54e14e v4.5.0 2021-02-11 10:58:40 +11:00
Mike Farah
10600dd29a Fixed delete bug 2021-02-11 10:58:40 +11:00
Mike Farah
3a464272d4 Added variable doc 2021-02-11 10:58:40 +11:00
Mike Farah
691efadfac Fixed variable precedence 2021-02-11 10:58:40 +11:00
Mike Farah
6efe4c4797 Fixing op precedences 2021-02-11 10:58:40 +11:00
Mike Farah
9e56b364c2 Fixing op precedences 2021-02-11 10:58:40 +11:00
Mike Farah
85ec32e3db Added variables 2021-02-11 10:58:40 +11:00
Mike Farah
5c73132c8e Dont create entries when selecting 2021-02-11 10:58:40 +11:00
Mike Farah
c6efd5519b Pass context through operators
Allows more sophisticated functionality
2021-02-11 10:58:40 +11:00
Mike Farah
3bae44be68 Added funding button 2021-02-11 10:58:40 +11:00
zy
48a7c59c4b change version from master to latest(v4) 2021-02-11 10:58:40 +11:00
zy
851fbd8cf5 fix: go install fails
Installation by go will result in an error:
module declares its path as: xxx,
but was required as: xxx.

Go install with master branch will fix it.

fix: #676
2021-02-11 10:58:40 +11:00
Mike Farah
820a3320be Fixed length of null to be zero 2021-02-11 10:58:40 +11:00
Mike Farah
61f569aebb fixing docker pipeline for nextime 2021-02-11 10:58:40 +11:00
Mike Farah
9ff51cd066 attempt to fix dockre 2021-02-11 10:58:40 +11:00
Mike Farah
9dd6d11362 Fixed bad docker version 2021-02-11 10:58:40 +11:00
Mike Farah
83139e21d9 Bump version 2021-02-11 10:58:40 +11:00
Mike Farah
c77001f969 Can add and merge append to null 2021-02-11 10:58:40 +11:00
evnp
1be3b31bbc Don't escape HTML chars when converting to json
json.Encoder and json.Marshal implicitly use HTMLEscape to convert
>, <, &, with \u003c, \u003e, \u0026. This behavior carries over
to yq, where chars will be escaped when outputting json but not when
outputting yaml.

This changeset disables this behavior via encoder.SetEscapeHTML(false).
Unfortunately there is no equivalent option for json.Marshal, so its
single usage has been replaced with an encoder (with escaping disabled).
2021-02-11 10:58:40 +11:00
Mike Farah
6c14a80991 Fixed cross-function combinatorial bug 2021-02-11 10:58:40 +11:00
Mike Farah
76bd1896e9 wip 2021-02-11 10:58:40 +11:00
Mike Farah
c63801a8a5 thoughts 2021-02-11 10:58:40 +11:00
Mike Farah
f7cfdc29e1 cross function fix wip 2021-02-11 10:58:40 +11:00
Mike Farah
07c6549a58 Fixed doker instructions 2021-02-11 10:58:40 +11:00
Mike Farah
29f40dad59 Fixing multiply doc 2021-02-11 10:58:40 +11:00
Mike Farah
fe33e7fcfe Incrementing version 2021-02-11 10:58:40 +11:00
Mike Farah
0707525b29 Added keys operator 2021-02-11 10:58:40 +11:00
Mike Farah
62acee54c3 Added split string operator 2021-02-11 10:58:40 +11:00
Mike Farah
d21c94cf4f Added join strings operator 2021-02-11 10:58:40 +11:00
Mike Farah
626e9cacaf Split doc operator 2021-02-11 10:58:40 +11:00
Mike Farah
02ef99560d Fixing add,multiply,alternative operator precendences 2021-02-11 10:58:40 +11:00
Mike Farah
c59209f041 Fixed remove comments example 2021-02-11 10:58:40 +11:00
Mike Farah
947ffb6986 Dont use pointer for env prefs (avoid nil) 2021-02-11 10:58:40 +11:00
Mike Farah
1a03031297 Dont use pointer for recursive prefs (avoid nil) 2021-02-11 10:58:40 +11:00
Mike Farah
2c7db0071a Dont use pointer for multiply prefs (avoid nil) 2021-02-11 10:58:40 +11:00
Mike Farah
0484d0232b Dont use pointer for commment prefs (avoid nil) 2021-02-11 10:58:40 +11:00
Mike Farah
91c72d2d9e Added merge if empty 2021-02-11 10:58:40 +11:00
Mike Farah
09ec740d45 Added operator level doc 2021-02-11 10:58:40 +11:00
Mike Farah
532dbd81a5 Incrementing version 2021-02-11 10:58:40 +11:00
Mike Farah
e86f83fb69 Renaming pathtree to expression 2021-02-11 10:58:40 +11:00
Mike Farah
7d5b6b5442 Removed global vars 2021-02-11 10:58:40 +11:00
Mike Farah
b749973fe0 UnwrapDoc now private 2021-02-11 10:58:40 +11:00
Mike Farah
ba223df4ac Moved eval function to eval interface 2021-02-11 10:58:40 +11:00
Mikhail Katychev
e6336bcb85 added lib_test.go 2021-02-11 10:58:40 +11:00
Mikhail Katychev
9ae03e0a1c added EvaluateNodes and EvaluateCandidateNodes to yqlib 2021-02-11 10:58:40 +11:00
Mike Farah
55712afea6 Merge now copies anchor names 2021-02-11 10:58:40 +11:00
Mike Farah
7518dac99c Fixed creation of candidateNode in operators to include file metadata 2021-02-11 10:58:40 +11:00
Mike Farah
49ac2bac13 Cleaning up exposed public api 2021-02-11 10:58:40 +11:00
Mike Farah
e28df367eb Fixed tag operator for top level node 2021-02-11 10:58:40 +11:00
Mike Farah
90ec05be54 Fixed equals operator for top level node 2021-02-11 10:58:40 +11:00
Mike Farah
8f5270cc63 Fixed has operator for top level node 2021-02-11 10:58:40 +11:00
Mike Farah
286590b01e fixing exposed functions and interfaces 2021-02-11 10:58:40 +11:00
Mike Farah
c1cf8b4e34 fixing exposed functions and interfaces 2021-02-11 10:58:40 +11:00
Mike Farah
461661112c Better add documentation 2021-02-11 10:58:40 +11:00
Mike Farah
578f2c27f9 Added scalar addition 2021-02-11 10:58:40 +11:00
Mike Farah
6ed037a9f6 Fixed collect at document level 2021-02-11 10:58:40 +11:00
Mike Farah
69386316f3 Better error handling will empty env 2021-02-11 10:58:40 +11:00
Mike Farah
a0e1f65b20 Better recursive decent docs 2021-02-11 10:58:40 +11:00
Mike Farah
8027f4c568 Better docIndex docs 2021-02-11 10:58:40 +11:00
Mike Farah
b13eb7083e Better env docs 2021-02-11 10:58:40 +11:00
Mike Farah
b505240d09 Merged env commands in :eye-roll: 2021-02-11 10:58:40 +11:00
Mike Farah
7a184bef78 Env Ops! 2021-02-11 10:58:40 +11:00
Mike Farah
34bc33d5c5 strenv 2021-02-11 10:58:40 +11:00
Mike Farah
4d8b64d05c wip 2021-02-11 10:58:40 +11:00
Mike Farah
2d9cc3c107 wip 2021-02-11 10:58:40 +11:00
Mike Farah
018a6e616d Bump version 2021-02-11 10:58:40 +11:00
Mike Farah
7fa2b20b48 Error when passing files and using null-input flag 2021-02-11 10:58:40 +11:00
Mike Farah
316e0d4d5a Bumped go yaml for comment hanlding fixes 2021-02-11 10:58:40 +11:00
Mike Farah
a0d4c51e27 Added webi 2021-02-11 10:58:40 +11:00
Mike Farah
ceff2cc18d Added recurse examples 2021-02-11 10:58:40 +11:00
Mike Farah
db62a16007 Added another delete example 2021-02-11 10:58:40 +11:00
Mike Farah
2a6e423d2d Can assign-update tag 2021-02-11 10:58:40 +11:00
Mike Farah
5a1b81cbfc Can assign-update style 2021-02-11 10:58:40 +11:00
Mike Farah
8c1f7dfbd7 Can assign-update aliases and anchors 2021-02-11 10:58:40 +11:00
Mike Farah
2e81384eed Can assign-update comments 2021-02-11 10:58:40 +11:00
Mike Farah
fbf36037c9 updating readme 2021-02-11 10:58:40 +11:00
Mike Farah
2957210e65 brew v3! 2021-02-11 10:58:40 +11:00
Mike Farah
bde419aaee Updated collect objcet doc 2021-02-11 10:58:40 +11:00
Mike Farah
9b185a4409 Added shorthand document index selection 2021-02-11 10:58:40 +11:00
Mike Farah
0c777a4967 Unwrap node in get tag to return proper tag at root level 2021-02-11 10:58:40 +11:00
Mike Farah
e9591e0cd5 Added v3 snap instructions 2021-02-11 10:58:40 +11:00
Mike Farah
04491e13c3 Refactored doc generation, add fi fileIndex alias 2021-02-11 10:58:40 +11:00
Mike Farah
5aff50a345 Fixed updating yaml from other files 2021-02-11 10:58:40 +11:00
Mike Farah
90d55fb52a update issue template, instruct questions to be raised in disussion 2021-02-11 10:58:40 +11:00
Mike Farah
e6f97518f3 fixed heading 2021-02-11 10:58:40 +11:00
Mike Farah
f4a44e7313 added tar.gz instructions 2021-02-11 10:58:37 +11:00
Mike Farah
edb5f213d7 updating readme 2020-12-31 09:22:22 +11:00
Mike Farah
5cfd9b05ee updating readme 2020-12-31 09:22:18 +11:00
jonatasrenan
0e764c59ce wget version var missing a 'v' prefix. 2020-12-31 08:59:38 +11:00
Mike Farah
1b887e23b3 scripts/check works for local and docker build 2020-12-30 10:40:41 +11:00
Chris Warth
a76b72e691 find golangci_lint through PATH 2020-12-30 10:30:53 +11:00
Mike Farah
9509831cff Updated docs 2020-12-29 22:35:57 +11:00
Mike Farah
e92180e89d updated release instructions 2020-12-29 09:59:16 +11:00
Mike Farah
a6ae33c3f1 Cleaning up release process, fixed github action version 2020-12-29 09:50:21 +11:00
Mike Farah
94a563dfd8 Updated docs 2020-12-28 11:57:20 +11:00
Mike Farah
0328cfd619 Added prettyPrint flag 2020-12-28 11:40:41 +11:00
Mike Farah
88663a6ce3 Added recurse keys operator 2020-12-28 11:24:42 +11:00
Mike Farah
b10a9ccfc6 Removed TraversePrefs 2020-12-28 10:29:43 +11:00
Mike Farah
9e9e15df73 More scenarios 2020-12-27 23:00:46 +11:00
Mike Farah
6cc6fdf322 Cleaning code 2020-12-27 22:56:15 +11:00
Mike Farah
a88c2dc5d3 Traverse Array Operator 2020-12-27 22:48:20 +11:00
Mike Farah
ea231006ed Refactoring traverse 2020-12-27 09:55:08 +11:00
Mike Farah
80f187f1a4 Refactoring traverse 2020-12-27 09:51:34 +11:00
Mike Farah
98e8b3479f Fixed nested array splat path 2020-12-25 12:49:05 +11:00
Mike Farah
eb539ff326 Fixed doc links 2020-12-23 10:37:51 +11:00
Mike Farah
c09f7aa707 Cleaning up docs 2020-12-23 10:30:13 +11:00
Mike Farah
7f5c380d16 v4.1.0 2020-12-23 10:23:54 +11:00
Mike Farah
6a05e517f1 Updated release instructions, remove gate for release 2020-12-23 10:23:09 +11:00
Mike Farah
8ee6f7dc1a fixing xcompile for git action 2020-12-22 22:50:01 +11:00
Mike Farah
8bd54cd603 fixing xcompile for git action 2020-12-22 22:31:28 +11:00
Mike Farah
f2f7b6db0f only tar executable files 2020-12-22 20:50:52 +11:00
Mike Farah
e082fee5d4 Fixed rhash call 2020-12-22 20:37:35 +11:00
Mike Farah
412911561f Fixed xcompile.sh 2020-12-22 20:23:13 +11:00
Mike Farah
52ae633cb7 trialing github release actions 2020-12-22 20:05:23 +11:00
Mike Farah
9356ca59bb trialing github release actions 2020-12-22 20:03:01 +11:00
Mike Farah
6dec167f74 trialing github release actions 2020-12-22 20:01:21 +11:00
Mike Farah
00f6981314 trialing github release actions 2020-12-22 19:56:56 +11:00
Mike Farah
4c60a2a967 trialing github release actions 2020-12-22 19:52:44 +11:00
Mike Farah
f4529614c4 trialing github release actions 2020-12-22 19:19:48 +11:00
Mike Farah
f059e13f94 automated docker releases! 2020-12-22 16:16:31 +11:00
Mike Farah
4dbf158505 playing with release action 2020-12-22 16:01:00 +11:00
Mike Farah
39a93a2836 playing with release action 2020-12-22 16:00:15 +11:00
Mike Farah
7c158fce26 playing with release action 2020-12-22 15:58:56 +11:00
Mike Farah
0bf43d2a55 playing with release action 2020-12-22 15:47:59 +11:00
Mike Farah
1b0bce5da6 Added alias operator;
alias, anchor and explode ops are now all documented together
2020-12-22 12:23:13 +11:00
Mike Farah
f112bde5fe Added anchor operator 2020-12-22 11:57:41 +11:00
Mike Farah
e5aa4a87a4 fixed test name 2020-12-22 11:47:58 +11:00
Mike Farah
f305e8fa12 Fixed delete full path 2020-12-22 11:45:51 +11:00
Mike Farah
d2d0c2c111 Added missing flag 2020-12-22 10:40:20 +11:00
Mike Farah
2aab79431c moved string space test to op values test cases 2020-12-22 10:38:52 +11:00
djajcevic
540d4953f5 #607 Fix string value with spaces error 2020-12-22 10:29:21 +11:00
Mike Farah
7849232255 tar files to keep permissions of exectuable 2020-12-22 10:25:15 +11:00
Mike Farah
57cd67f055 Added compressed binaries for download managers and better file size 2020-12-21 21:40:08 +11:00
Mike Farah
6b17fd4fc1 Added trivy to docker build, bumped alpine image 2020-12-21 15:48:25 +11:00
Mike Farah
ca8cd78616 Add now uses crossFunction 2020-12-21 11:54:03 +11:00
Mike Farah
9876b0ce8f Boolean operators now use the crossFunction util func 2020-12-21 11:42:35 +11:00
Mike Farah
a23272727d Added Alternative op 2020-12-21 11:32:34 +11:00
Mike Farah
1fb37785d6 Better readme 2020-12-20 13:34:58 +11:00
Mike Farah
efb9027540 Merge branch 'v4' 2020-12-20 13:31:23 +11:00
Mike Farah
2577fe5425 Increment version 2020-12-20 13:29:12 +11:00
Mike Farah
f39c57fed9 Updating readme for imminent v4 release 2020-12-20 12:39:42 +11:00
Mike Farah
1e8f755e7c Updating readme for imminent v4 release 2020-12-20 12:37:15 +11:00
Mike Farah
bb088f6aa2 Added better error reporting 2020-12-17 14:19:46 +11:00
Mike Farah
a96b74e779 Added better error reporting 2020-12-17 14:02:54 +11:00
Mike Farah
09a9e1e7f0 handle multiple document streams 2020-12-15 14:33:50 +11:00
Mike Farah
db60746e4e Can now properly handle .a[] expressions 2020-12-09 12:15:14 +11:00
Shashank Veerapaneni
8846255d1c Update README.md
Fix the wget download command
2020-12-02 19:42:05 +11:00
Mike Farah
a3e422ff76 added another test 2020-12-01 18:10:10 +11:00
Mike Farah
2c3357702d clarified pipe parsing tests 2020-12-01 18:08:41 +11:00
Mike Farah
c9dbf04da3 Added pipe and length docs, fix pipe precedence 2020-12-01 17:58:07 +11:00
Mike Farah
fbe53885ae
Update README.md 2020-12-01 17:24:14 +11:00
Mike Farah
da027f69d7 updated cobra package 2020-12-01 16:16:20 +11:00
Mike Farah
8cd290c00b incrementing version 2020-12-01 15:15:12 +11:00
Mike Farah
363fe5d283 Added sort keys operator 2020-12-01 15:06:54 +11:00
Mike Farah
773b1a3517 fixed create doc for eval-all 2020-12-01 14:23:27 +11:00
Mike Farah
cf4915d786 improved acceptance tests 2020-12-01 14:14:16 +11:00
Mike Farah
08f579f4e3 Fixed create yaml 2020-12-01 14:06:49 +11:00
Mike Farah
c9229439f7 added exit status 2020-11-30 16:35:21 +11:00
Mike Farah
9bc66c80b6 Added write-inlplace flag 2020-11-30 16:05:07 +11:00
Mike Farah
8de10e550d wip - write in place 2020-11-29 20:25:47 +11:00
Mike Farah
1258fa199e Updated lib todo list 2020-11-28 11:25:10 +11:00
Mike Farah
3a030651a3 Added append equals, merge append. Fixed creating numeric arrays 2020-11-28 11:24:16 +11:00
Mike Farah
3f48201a19 wip 2020-11-28 10:46:04 +11:00
Mike Farah
3cecb4e383 wip 2020-11-28 10:41:09 +11:00
Mike Farah
13679e51e2 Added get key examples 2020-11-26 11:20:53 +11:00
Mike Farah
f42728eef7 updated issue templates 2020-11-25 22:33:50 +11:00
Mike Farah
93136ba840 updated issue templates 2020-11-25 22:32:47 +11:00
Mike Farah
5665dc7b71 updated issue templates 2020-11-25 22:31:55 +11:00
Mike Farah
9302279dd2 updated issue templates 2020-11-25 22:22:16 +11:00
Mike Farah
5205f01248 Fixed recursive decent on empty objects/arrays 2020-11-25 15:01:12 +11:00
Mike Farah
0a66bb797d 4 alpha2 2020-11-25 13:32:32 +11:00
Mike Farah
5972bb2f23 attempt to fix pipeline 2020-11-25 11:17:28 +11:00
Mike Farah
ec43f5e7c3 go mod tidy 2020-11-25 11:06:43 +11:00
Mike Farah
1ce30b25dc Add operator! 2020-11-24 13:07:19 +11:00
Mike Farah
3d6a231722 Added has operator 2020-11-24 11:38:39 +11:00
Mike Farah
3f04a1b52e Fixed empty array op 2020-11-22 13:50:32 +11:00
Mike Farah
aed598c736 Fixing docs 2020-11-22 13:16:54 +11:00
Mike Farah
e9fa873af8 path operator singular 2020-11-22 12:22:15 +11:00
Mike Farah
064cff1341 added path operator! 2020-11-22 12:19:57 +11:00
Mike Farah
fc3af441e5 Extracted out evaluators 2020-11-22 11:56:28 +11:00
Mike Farah
e451119014 Added File operators 2020-11-20 23:08:12 +11:00
Mike Farah
d38caf6bc2 Added File operators 2020-11-20 22:57:32 +11:00
Mike Farah
4e385a1b93 get file wip 2020-11-20 15:50:15 +11:00
Mike Farah
356aac5a1f fixed boolean example 2020-11-20 15:33:21 +11:00
Mike Farah
663413cd7a Fixed typo 2020-11-20 15:31:49 +11:00
Mike Farah
f03005f86d Fixed boolean ops 2020-11-20 15:29:53 +11:00
Mike Farah
bc87aca8d7 wip 2020-11-20 14:35:34 +11:00
Mike Farah
62f262147c Attempt to fix git pipeline 2020-11-20 14:00:24 +11:00
Mike Farah
0259bb44c1 Updated readme 2020-11-20 13:55:18 +11:00
Mike Farah
c08980e70f Set entrypoint to yq 2020-11-20 13:53:44 +11:00
bahetiamit
e07a5b6065 Adding github action on release to publish multi-arch image 2020-11-20 13:48:35 +11:00
Mike Farah
f69a81b79b Updated readme re v4 2020-11-19 22:56:13 +11:00
Mike Farah
9674acf684 Fixed docker file, fixed doco 2020-11-19 22:53:05 +11:00
Mike Farah
8e1ce4ca70 Updated todo 2020-11-19 22:12:34 +11:00
Mike Farah
9bd9468526 Minor fixes 2020-11-19 22:11:26 +11:00
Mike Farah
75044e480c Added plain assignment 2020-11-19 17:08:13 +11:00
Mike Farah
36084a60a9 Added tag operator 2020-11-19 16:45:05 +11:00
Mike Farah
9b48cf80e0 updated todo 2020-11-18 20:43:36 +11:00
Mike Farah
bb3b08e648 wip style docs and test 2020-11-18 20:42:37 +11:00
Mike Farah
dcacad1e7e docs 2020-11-18 10:32:30 +11:00
Mike Farah
3356061e1e select doc 2020-11-18 09:52:03 +11:00
Mike Farah
2c062bc2a5 Added printer test 2020-11-18 09:52:03 +11:00
Mike Farah
088ec36acd include docs for tracking 2020-11-18 09:50:52 +11:00
Mike Farah
83cb6421df added test to ensure json keys remain in order 2020-11-17 16:17:38 +11:00
Mike Farah
a57944d123 Fixed printer 2020-11-16 12:09:57 +11:00
Mike Farah
79867473d5 updating release 2020-11-16 10:28:57 +11:00
Mike Farah
b3efcdc202 more docs 2020-11-15 10:58:47 +11:00
Mike Farah
af2aa9ad91 more docs 2020-11-15 10:50:30 +11:00
Mike Farah
db4762ef7c more docs 2020-11-14 13:38:44 +11:00
Mike Farah
860655b4cd Better documentation generation 2020-11-13 21:34:43 +11:00
Mike Farah
d91b25840a Better documentation generation 2020-11-13 21:22:05 +11:00
Mike Farah
019acfe456 Better documentation generation 2020-11-13 20:58:01 +11:00
Mike Farah
af39fc737d Fixed linting 2020-11-13 14:07:11 +11:00
Mike Farah
f8a700e400 Alpha1 of v4! 2020-11-13 13:35:59 +11:00
Mike Farah
708ff02e8d Fixed collect object for multi doc 2020-11-13 13:19:54 +11:00
Mike Farah
2edf64182b refining 2020-11-06 14:37:01 +11:00
Mike Farah
b290a65602 document index 2020-11-06 12:11:38 +11:00
Mike Farah
05520c2168 more tests 2020-11-06 11:45:18 +11:00
Mike Farah
5ab584afac comment ops! 2020-11-06 11:23:26 +11:00
Mike Farah
b1f139c965 refactored 2020-11-04 10:48:43 +11:00
Mike Farah
0cb2ff5b2e explode when outputting to json 2020-11-02 13:55:03 +11:00
Mike Farah
d6ff198d63 explode! 2020-11-02 13:43:45 +11:00
Mike Farah
e515b8c2db got style 2020-11-02 11:20:52 +11:00
Mike Farah
b63b9644aa multiply merge anchors 2020-10-30 12:40:44 +11:00
Mike Farah
461c3e719c merge anchors! 2020-10-30 12:00:48 +11:00
Mike Farah
643f2467ee simple anchors 2020-10-30 10:56:45 +11:00
Mike Farah
4edb3e9021 create object fixes 2020-10-28 13:00:26 +11:00
Mike Farah
41c08891d3 create object fixes 2020-10-28 11:34:01 +11:00
Mike Farah
85d059340b first cli 2020-10-27 16:45:16 +11:00
Mike Farah
badd476730 collect object operator! 2020-10-21 13:54:51 +11:00
Mike Farah
65e6e492cd wip 2020-10-21 12:54:58 +11:00
Mike Farah
6a698332dd more 2020-10-20 16:27:30 +11:00
Mike Farah
49615f5581 Added null 2020-10-20 15:40:11 +11:00
Mike Farah
4f574efdc4 simplified, refactored 2020-10-20 15:33:20 +11:00
Mike Farah
ccb718cd0f Moved macports to community, announced v4 2020-10-20 13:58:45 +11:00
Mike Farah
73cf6224f2 wip 2020-10-20 13:53:26 +11:00
Herby Gillot
815edef86a README: add instructions for installing with MacPorts 2020-10-20 13:50:10 +11:00
Mike Farah
4bc98776a6 wip 2020-10-19 20:05:38 +11:00
Mike Farah
1910563bfe merge 2020-10-19 16:36:46 +11:00
Mike Farah
2ddf8dd4ed autovivification, merge! 2020-10-19 16:14:29 +11:00
Mike Farah
cd83d94b6a updating release instructions 2020-10-19 09:01:52 +11:00
Mike Farah
6afc2e9189 3.4.1 2020-10-19 08:40:59 +11:00
Mike Farah
49b810cedd Multiply wip 2020-10-19 08:36:33 +11:00
Mike Farah
391ab8d70c removed docs, added recursive decent 2020-10-18 11:31:36 +11:00
Mike Farah
b026ebf2c3 more refinement 2020-10-17 22:58:18 +11:00
Mike Farah
5e544a5b7e value parse test 2020-10-17 22:39:01 +11:00
Mike Farah
60511f5f92 refactoring, fixing 2020-10-17 22:10:47 +11:00
Mike Farah
59296b7d12 can assign children! 2020-10-16 12:49:15 +11:00
Mike Farah
fccd03036f can assign values 2020-10-16 12:47:31 +11:00
Mike Farah
6829d8cb78 JQ like syntax wip 2020-10-16 12:29:26 +11:00
Mike Farah
449fb8952c adding pipe 2020-10-13 14:37:01 +11:00
Mike Farah
afffb2c3ba collect 2020-10-13 14:04:21 +11:00
Mike Farah
829ca3b424 read tests 2020-10-13 13:17:18 +11:00
Mike Farah
d19e9f6917 read command 2020-10-13 12:51:37 +11:00
Mike Farah
6a0a4efa7b added single count operator 2020-10-12 12:24:59 +11:00
Mike Farah
288aec942c ops first class 2020-10-12 10:46:54 +11:00
Mike Farah
7c4cf72468 wip 2020-10-12 10:09:13 +11:00
Mike Farah
b025000f20 cool, both work 2020-10-11 11:46:07 +11:00
Mike Farah
1ba1e90e58 dont splat scalars 2020-10-11 11:45:20 +11:00
Mike Farah
e0d1aed5b9 Refactoring 2020-10-11 11:24:22 +11:00
Mike Farah
e6fd6905eb wip 2020-10-10 23:04:10 +11:00
Mike Farah
0a2a3c4374 can delete 2020-10-10 22:42:09 +11:00
Mike Farah
ac076cd34a assign operator 2020-10-10 15:24:37 +11:00
Mike Farah
8170eec6d1 extracted out operators 2020-10-10 15:00:39 +11:00
Mike Farah
23083ed974 fixed equals number issue 2020-10-09 17:07:53 +11:00
Mike Farah
93aaa8ccee array equals! 2020-10-09 16:43:43 +11:00
Mike Farah
d7716551cf arrays 2020-10-09 16:38:07 +11:00
Mike Farah
c791e5c663 equal! 2020-10-09 15:06:01 +11:00
Mike Farah
a6d4dbb8b8 equal! 2020-10-09 15:05:45 +11:00
Mike Farah
c7ebdda530 added AND op 2020-10-09 12:10:46 +11:00
Mike Farah
a0d940638c use orderermap 2020-10-09 12:04:19 +11:00
Mike Farah
c09513803a wip 2020-10-09 11:37:47 +11:00
Mike Farah
f95226e267 ops work in theory! 2020-10-09 11:10:37 +11:00
Mike Farah
f479a7e8e3 wip 2020-10-09 10:59:03 +11:00
Mike Farah
f7d4695837 binary tree ftw 2020-10-09 08:51:14 +11:00
Mike Farah
5ee52f9506 wip 2020-10-09 08:51:14 +11:00
Mike Farah
c2159d9861 postfix with traverse op 2020-10-09 08:51:14 +11:00
Mike Farah
95bc1e1599 include traverse as a operator token 2020-10-09 08:51:14 +11:00
Mike Farah
e037c57725 postfix wip! 2020-10-09 08:51:14 +11:00
Mike Farah
e32bc43c4e postfix wip! 2020-10-09 08:51:14 +11:00
Mike Farah
a8bdc12d83 to postfix wip 2020-10-09 08:51:14 +11:00
Mike Farah
ae59ad57f4 tree wip 2020-10-09 08:51:14 +11:00
Mike Farah
c321600afa fixed wrapping! 2020-10-09 08:51:14 +11:00
Mike Farah
4c95efa469 wip 2020-10-09 08:51:14 +11:00
Morgan Bazalgette
996ee0b433 add test for key order 2020-10-09 08:38:42 +11:00
Morgan Bazalgette
bb9cb0c60e fix tests 2020-10-09 08:38:42 +11:00
Morgan Bazalgette
a125495eec keep order of keys when json marshalling 2020-10-09 08:38:42 +11:00
Peter Benjamin
7fa2835e13 fix(image): bump alpine image minor version
This is to patch a high security vulnerability.
Closes #550

Signed-off-by: Peter Benjamin <petermbenjamin@gmail.com>
2020-10-08 12:29:04 +11:00
Mike Farah
e0f5cb3c59
Update README.md
Fixing alpine instructions
2020-10-01 09:20:32 +10:00
Mike Farah
87550b7fe5 Show errors on validate 2020-09-21 20:34:29 +10:00
Mike Farah
5554301c29 Updated install instructions, added wget 2020-09-21 11:06:57 +10:00
Mike Farah
3b0aaac626 Added checksum hashes order to release 2020-09-18 16:37:45 +10:00
Mike Farah
65cb472604
Update README.md 2020-09-16 15:43:08 +10:00
Mike Farah
fbba38c9b7
Update README.md 2020-09-16 15:42:09 +10:00
Mike Farah
e5948c4f16 Fixed potential npe 2020-09-13 12:14:20 +10:00
Mike Farah
4eaadf98d0 Set STDOUT to default out when printing via cobra 2020-09-13 11:49:55 +10:00
Mike Farah
eedbb0a99f Explode anchors now applies to map keys too 2020-09-13 11:26:07 +10:00
Mike Farah
7dabc57b65 Added array update merge example 2020-09-13 11:12:52 +10:00
Mike Farah
fcd3a90f67 Bumping minor version to indicate new merge features and slight backwards incompatibility 2020-09-13 11:06:56 +10:00
Mike Farah
88e99e5336 New line in docs for better readability 2020-09-13 11:04:48 +10:00
Mike Farah
a8cfccd3af Merge master fix 2020-09-13 10:59:40 +10:00
Mike Farah
3355e80d85 Merge branch 'master' into new-merge2 2020-09-13 10:52:31 +10:00
Mike Farah
f528b28938 Convert to JSON now handles non string keys 2020-09-13 10:44:22 +10:00
Mike Farah
5b7b390a33 Force static linking 2020-09-13 10:32:45 +10:00
Mike Farah
4f12e09e78 More info on other installation methods 2020-09-13 10:16:24 +10:00
Mike Farah
ee732fbf0b Added Alpine Linux instructions 2020-09-13 10:09:02 +10:00
Mike Farah
1507f929a2 Fixing version stuffup - now 3.3.4 2020-09-11 11:11:49 +10:00
Mike Farah
c11c3df84f Version 3.2.3 2020-09-09 12:02:30 +10:00
Mike Farah
06bb3ac826 Can create new yaml files using scripts again 2020-09-09 11:02:00 +10:00
Mike Farah
778f8c6916 Removed doctools 2020-09-08 13:03:18 +10:00
Mike Farah
9f43a4a265 Keep comments when using the write commandt o update values 2020-09-08 09:46:04 +10:00
Mike Farah
bb6f07d147 Use latest go-lang 2020-09-08 09:43:11 +10:00
Mike Farah
759456e375 Lib version bumps 2020-09-08 09:13:57 +10:00
Mike Farah
5e59803037
Update README.md 2020-09-03 10:58:00 +10:00
Roberto Mier Escandon
a0cb691601 Bump version to 3.3.2 2020-08-16 13:02:07 +10:00
Mike Farah
fea8510061 Added comments merge strategy 2020-07-17 15:51:03 +10:00
Mike Farah
b380ea2892 better array merge strategy name 2020-07-17 13:27:27 +10:00
Mike Farah
d66a709213 refactored array merge flags into a strategy 2020-07-17 13:26:20 +10:00
Mike Farah
2fc39b3865 Can overwrite arrays when merging 2020-07-17 13:07:32 +10:00
Mike Farah
ee07edbd88 Added merge alias test 2020-06-18 09:56:36 +10:00
Mike Farah
b11661a1be Refactored merge - will allow more sophisticated mergin 2020-06-18 09:44:36 +10:00
Mike Farah
eac218980e Visit document node 2020-06-18 09:03:40 +10:00
Mike Farah
80e7f46538 Dont log mergePathStackToString - end up with duplicate logs 2020-06-18 09:03:40 +10:00
Mike Farah
086f0ec6b9
Update bug_report.md 2020-06-15 21:42:26 +10:00
Mike Farah
89cbe63343 Fixed deep read at root level 2020-06-15 12:31:13 +10:00
RyderXia
07cd3d4b8b return error 2020-06-13 16:45:52 +10:00
RyderXia
b7b6988e76 mk TempDir 2020-06-13 16:45:52 +10:00
Mike Farah
9de2039c31 Version bump 2020-06-12 12:23:18 +10:00
Mike Farah
767709fef5 Fixed error flag 2020-06-12 12:21:46 +10:00
Mike Farah
d9ae8e1e5a Updated readme 2020-06-12 09:30:05 +10:00
Mike Farah
b0fa0e5b86 added shell completion instructions 2020-06-11 18:50:38 +10:00
Mike Farah
6777d639c0 Fixed error handling 2020-06-11 18:30:45 +10:00
Mike Farah
de8dcff803 Added shell completions 2020-06-11 18:27:01 +10:00
Mike Farah
765ada4dc6 Bumping version 2020-06-11 15:01:18 +10:00
Mike Farah
1405584892 New,Update now support anchors and aliases 2020-06-11 13:57:13 +10:00
Mike Farah
8c9c326342 Usage error messages now go to StdErr 2020-06-11 10:14:33 +10:00
Mike Farah
e90b00957b Added missing flow style 2020-06-11 09:58:10 +10:00
Mike Farah
71f5f76213 Delete now works with deep splat 2020-06-11 09:53:36 +10:00
Mike Farah
b9e304e7a4 Can stripComments and explodeAnchors for compare 2020-06-11 09:13:55 +10:00
Mike Farah
d473c39a44 Added exit flag 2020-06-10 16:55:20 +10:00
Mike Farah
9624410add Significantly improved performance of exploding anchors (improves to json speed) 2020-06-10 16:36:33 +10:00
Mike Farah
b55fe48bd8 Update to latest go-yaml library, updated test w.r.t. formatting and comment handling fixes 2020-06-10 16:02:12 +10:00
adripo
721dd57ed4 Fixed typo
removed repeated word
2020-06-02 09:02:20 +10:00
Roberto Mier Escandon
6fc3566acd Changelog updated for version 3.3-0 2020-05-01 10:31:53 +10:00
Mike Farah
4b63d92a3c Added choco install instructions 2020-04-22 23:29:44 +10:00
M. Minot
3ccd32a47e docs(readme): protect parameter expansions
Avoid unwanted word-splitting, including in the working directory path.
2020-04-19 00:18:08 +10:00
Mike Farah
3f913afbb9 Fixed cli doco 2020-04-18 08:23:38 +10:00
Mike Farah
ff598e1933 Increment version 2020-04-17 17:28:41 +10:00
Mike Farah
23de61a8d7 Can now update tag/style of nodes without affecting the value 2020-04-17 17:09:33 +10:00
Mike Farah
64135a16e1 Use single/double instead of singleQuoted/doubleQuoted 2020-04-17 11:24:45 +10:00
Mike Farah
06d8715cbe Added customStyle flag, split command tests 2020-04-17 11:03:43 +10:00
Mike Farah
e15633023f Increment version 2020-04-16 15:29:58 +10:00
Mike Farah
6f0a329331 Fixed inplace errors clearing out original file 2020-04-15 11:48:42 +10:00
apenav
55511de9af Upgrade yq_dev container to golang:1.14 base container and deb packages to python3 2020-04-15 09:07:40 +10:00
Mike Farah
2db69c91c9 Added strip comments functionality 2020-04-14 14:48:45 +10:00
Mike Farah
33e35d10dd Dont unwrap json output by default 2020-04-14 14:11:44 +10:00
Mike Farah
dfdbbbb24a Added unwrap flag 2020-04-14 13:48:25 +10:00
Mike Farah
55c4d01a91 Update deps 2020-04-14 11:39:08 +10:00
Mike Farah
7dc3d62bb6 Attempt to fix github flow 2020-04-14 11:33:25 +10:00
Mike Farah
11116804c5 Upgrade to git 1.14 2020-04-14 11:19:00 +10:00
Mike Farah
f68b24323e Attempt to fix git workflow action 2020-04-14 11:18:45 +10:00
Mike Farah
8f166a9848 Fixed negative index bug 2020-04-14 11:17:29 +10:00
Mike Farah
3c36db9285 Split github actions pipeline 2020-04-14 09:22:38 +10:00
Mike Farah
605d6fab9b Try newest golang version 2020-04-13 11:05:36 +10:00
Mike Farah
1f9a3f5f6c Added negative index capability 2020-04-13 10:36:46 +10:00
Mike Farah
a06320f13c Attempt to fix snapcraft 2020-03-04 15:32:26 +11:00
Mike Farah
279996533d Adding sourcetype to snapcraft yml 2020-03-04 15:21:09 +11:00
Mike Farah
efe942727d Bump snapcraft go version 2020-03-02 09:36:03 +11:00
Mike Farah
e4dc70cc84 Fixing github action description 2020-03-02 08:47:19 +11:00
Mike Farah
8ade1275e2 Fixing github action description 2020-03-02 08:43:47 +11:00
Mike Farah
e1e05d85e3 Added another multistring test 2020-03-01 17:21:04 +11:00
Mike Farah
b99467432e Fixed readme links 2020-03-01 17:15:32 +11:00
Mike Farah
6b07143af7 Fixed printing of scalars 2020-03-01 17:13:00 +11:00
Mike Farah
ed234e37ce Better action description 2020-02-29 18:22:52 +11:00
Mike Farah
c0e4917d52 Better action description 2020-02-28 19:43:32 +11:00
Mike Farah
2713893f87 Added icon and color to github action 2020-02-28 16:42:18 +11:00
Mike Farah
6bb221e973 3.2.0 2020-02-28 16:35:45 +11:00
Mike Farah
7eb01a81da Shorter colors flag 2020-02-28 15:57:44 +11:00
Mike Farah
5c117204fa Shorter colors flag 2020-02-28 15:57:05 +11:00
Mike Farah
a4fa8f1341 Compare returns exit code 1 when not matching 2020-02-28 15:49:34 +11:00
Mike Farah
69caccd2d3 Added another scenario for find by value 2020-02-28 15:28:37 +11:00
Mike Farah
67fb924e0e Can find array elements bu value 2020-02-28 15:24:16 +11:00
Mike Farah
b64187fe32 Dont recurse into scalar nodes
Fixes https://github.com/mikefarah/yq/issues/375
2020-02-28 15:03:56 +11:00
Mike Farah
8e6ceba2ac Array length and collect 2020-02-28 14:03:40 +11:00
Mike Farah
6ef04e1e77 wip 2020-02-28 14:03:40 +11:00
Mike Farah
10029420a5 wip 2020-02-28 14:03:40 +11:00
Mike Farah
f91093d5fe Colors work for all commands 2020-02-28 10:42:19 +11:00
Risent Veber
090432d241 add colorization 2020-02-28 10:42:19 +11:00
Mike Farah
22d5bd3615 Show github build status 2020-02-26 21:14:15 +11:00
Mike Farah
0d477841da Show github build status 2020-02-26 21:13:39 +11:00
Mike Farah
1f72817d74 Removing travis integration - use github 2020-02-26 21:11:27 +11:00
Mike Farah
125d04a75b Attempt to fix git workflow 2020-02-26 11:03:05 +11:00
chocolatey030@gmail.com
08f6a90603 [GH-371] cleaned up go modules using 'go mod tidy' 2020-02-26 10:44:03 +11:00
Pascal Sochacki
da398765b8 added files for github action 2020-02-26 09:09:37 +11:00
Roberto Mier Escandon
d356fa0d0b Bump debian package to version 3.1-2
Updated all files to be more Debian compliant
Update release instructions for get mod vendor before releasing
2020-02-25 08:57:41 +11:00
Roberto Mier Escandon
d22bfc241b Add changelog 2020-02-25 08:57:41 +11:00
Mike Farah
954affea23
Create go.yml 2020-02-21 21:19:09 +11:00
Mike Farah
b0d1afb601
Update CONTRIBUTING.md 2020-02-21 21:16:47 +11:00
Mike Farah
b286636909
Create CODE_OF_CONDUCT.md 2020-02-21 21:13:14 +11:00
Mike Farah
bdf47c9797 Separated contribution notes 2020-02-21 21:12:09 +11:00
Mike Farah
1cc20d52bb
Create CONTRIBUTING.md 2020-02-21 21:11:34 +11:00
Mike Farah
651d9edf88 Updating readme 2020-02-21 21:08:28 +11:00
Mike Farah
903605df39 Updating readme 2020-02-21 21:07:59 +11:00
Mike Farah
0f9facc84b Update issue templates 2020-02-21 21:04:54 +11:00
Mike Farah
5af86b1333 Update issue templates 2020-02-21 21:00:36 +11:00
Mike Farah
2bd2a85a4c Fixed trailing empty docs 2020-02-21 11:37:59 +11:00
Mike Farah
ceb76e5c17 Fixed trailing empty docs 2020-02-21 11:34:26 +11:00
Mike Farah
44322f0248 Fixed writing to null document 2020-02-21 11:02:10 +11:00
Mike Farah
0347516d82 Always print new line so wc works properly 2020-02-21 10:29:37 +11:00
Mike Farah
a46386e093 Fixed special characters in path for merging 2020-02-18 20:18:49 +11:00
Mike Farah
f5c3beb159 Added test for https://github.com/mikefarah/yq/issues/361 2020-02-18 20:02:09 +11:00
Mike Farah
9864afc4e7 Fixed empty merge problem 2020-02-18 09:15:46 +11:00
Roberto Mier Escandon
69fae2d9cb Inc Deb version 2020-02-17 09:29:02 +11:00
Mike Farah
83c13ce392 Fixed empty merge problem - need to visit empty arrays and objects 2020-02-13 14:56:58 +11:00
Mike Farah
d83c46eec2 Uncomment line in publish script 2020-02-13 10:22:52 +11:00
Mike Farah
65802f9e0e updated readme 2020-02-12 16:28:24 +11:00
Mike Farah
07309e1685 Inc snapcraft version 2020-02-12 16:27:25 +11:00
Mike Farah
24e906bae6 Fixed numeric map key issue 2020-02-12 15:40:21 +11:00
Mike Farah
f084f2bb23 Inc version for next release 2020-02-12 12:04:41 +11:00
Mike Farah
96a4161a92 Fixed explode anchors for array roots 2020-02-12 11:03:40 +11:00
Mike Farah
5cc01e43bc Can supply value for write from file 2020-02-08 14:04:54 +11:00
Mike Farah
9de2573009 Fixed merge append arrays 2020-02-07 16:32:39 +11:00
Mike Farah
29521f2e3e Simplified when to visit a node 2020-02-07 14:52:37 +11:00
Mike Farah
af5724ba29 Updated readme 2020-02-07 11:28:56 +11:00
Mike Farah
0a39d29c53 Updated readme 2020-02-07 11:26:29 +11:00
Mike Farah
72cd3e4a2a Fixed explode for aliases to scalars 2020-02-07 10:42:07 +11:00
Mike Farah
d40ad9649d Fixed explode for aliases to scalars 2020-02-07 10:09:20 +11:00
Mike Farah
63313ebb02 Merge branch 'coryrc-fix-merge-with-dots' into compare 2020-02-07 09:10:25 +11:00
Mike Farah
de3bfaef60 Merge branch 'fix-merge-with-dots' of git://github.com/coryrc/yq into coryrc-fix-merge-with-dots 2020-02-07 09:09:52 +11:00
Mike Farah
108b5cb093 Fixed explode for simple anchors 2020-02-07 09:08:52 +11:00
Mike Farah
b116f40348 Major version bump for new features 2020-02-06 12:20:51 +11:00
Cory Cross
ea9df0eede Fix path generation when merging file has period in key
The program generates a path for every leaf node in the
file-to-be-merged. It does not escape them if they contain a dot, as
the path-expressions document mentions is necessary.

Add in a test for this condition. Verified it fails without the fix.
2020-02-04 22:37:00 -08:00
Mike Farah
b7dd3e8e0a Added explode test 2020-02-05 15:08:13 +11:00
Mike Farah
dc13fa99f7 wip: explode anchors 2020-02-05 14:10:59 +11:00
Mike Farah
179049a535 Always allow empty 2020-02-04 14:42:08 +11:00
Mike Farah
2fa8b24272 Can merge one file 2020-02-04 14:33:35 +11:00
Mike Farah
f1dbe13f21 Can write and merge into empty files :) 2020-02-04 14:21:54 +11:00
Mike Farah
02258fbaae Fixed deep splatting merge anchors - dont visit twice 2020-02-04 14:10:12 +11:00
Mike Farah
6f0538173b Fix delete adding entries 2020-02-04 09:58:20 +11:00
Mike Farah
6840ea8c78 can set indent levels 2020-02-03 16:56:01 +11:00
Mike Farah
70b88fa778 Pretty print everything test 2020-02-03 16:40:17 +11:00
Mike Farah
bfc1a621c4 Pretty print everything 2020-02-03 16:37:53 +11:00
Mike Farah
166f866f28 Pretty print json 2020-02-03 16:31:03 +11:00
Mike Farah
b3598aaa43 Updated readme 2020-02-03 16:21:00 +11:00
Mike Farah
14ac791eaf Fixed compare output, added tests 2020-02-03 15:35:00 +11:00
Mike Farah
25293a6894 Check write errors 2020-02-03 14:28:38 +11:00
Mike Farah
d828b214cc More powerful compare 2020-02-03 14:15:12 +11:00
Mike Farah
9e47685271 Compare first cut 2020-02-03 13:59:16 +11:00
Mike Farah
699fce9da4 Added default value flag - for printing out a value when reading and there are no matches 2020-02-03 10:13:48 +11:00
Mike Farah
f52de57652 Don't fail when reading an empty file 2020-02-03 09:15:16 +11:00
Mike Farah
b7554e6e76 Pretty print disclaimer 2020-01-31 16:37:24 +11:00
Mike Farah
ec25511f1b Pretty print 2020-01-31 16:35:01 +11:00
Mike Farah
c6a52012ab Prefer download binary 2020-01-31 10:37:27 +11:00
Mike Farah
63ded205e8 Added docs for validate 2020-01-31 10:32:44 +11:00
Mike Farah
d1c1ab0a75 Added validate command 2020-01-30 16:34:43 +11:00
Mike Farah
6ec8386f9e Fixed bad yaml handling 2020-01-30 16:32:28 +11:00
Mike Farah
4dbe3636c2 Splat array is now the fallback instead of parsing int 2020-01-30 15:11:47 +11:00
Mike Farah
4a5bd0ff5b No need to log error 2020-01-30 15:00:27 +11:00
Mike Farah
44f36833cf Fixed delete array pattern matching 2020-01-30 14:55:58 +11:00
Mike Farah
789ea02096 Merge branch 'v3' 2020-01-30 13:01:16 +11:00
Mike Farah
7c27491dd6 Removed user docs 2020-01-30 10:32:34 +11:00
Mike Farah
720cc8f798 Update cli docs 2020-01-30 09:58:21 +11:00
Mike Farah
abda0e38af Removed unused dep 2020-01-22 16:39:41 +11:00
Mike Farah
5cd4c347b0 Updated user docs link 2020-01-22 16:25:52 +11:00
Mike Farah
1a4d8158ba Removed custom value parsing logic 2020-01-20 08:42:08 +11:00
Mike Farah
8a65822b0b Updated docs to include value parsing 2020-01-20 08:35:03 +11:00
Ryan SIU
b7148adf20 #323 Fix the unit test 2020-01-15 14:02:48 +11:00
Ryan SIU
64d38e9f03 #323 Refactor the cobra command with standard structure 2020-01-15 14:02:48 +11:00
Mike Farah
3a387e65be Updated travis ci build 2020-01-15 09:02:06 +11:00
Mike Farah
56ba7c9a43 Updated travis ci build 2020-01-15 09:01:08 +11:00
Mike Farah
d203ec7b56 Accidently changed sample.yaml 2020-01-15 08:55:01 +11:00
Mike Farah
e2f79a3dae bolden contribute disclaimer 2020-01-13 19:45:24 +11:00
Mike Farah
2d7be26ad5 wip update docs 2020-01-13 16:58:11 +11:00
Mike Farah
350a8343e9 adv search with prefix! 2020-01-11 19:52:33 +11:00
Mike Farah
a3f8f9df10 more bits 2020-01-11 19:38:16 +11:00
Mike Farah
35fd5b7ae4 Extracted out is path expression checking logic 2020-01-11 19:30:27 +11:00
Mike Farah
2d237e7e8e it works! wip 2020-01-11 19:13:52 +11:00
Mike Farah
74c7a4e027 it works! wip 2020-01-11 18:52:15 +11:00
Mike Farah
f18c5161e0 Fixed sponge instructions 2020-01-11 16:01:32 +11:00
Mike Farah
eeeeeffd7b Added note about v3 2020-01-11 11:44:02 +11:00
Mike Farah
96955ffa9c release notes 2020-01-11 09:55:24 +11:00
Mike Farah
9361b8b3e9 Beta 2020-01-11 09:14:32 +11:00
Mike Farah
24dcb56466 Inc version - fix help text 2020-01-11 09:13:42 +11:00
Mike Farah
728cbe991a Print path is more accurate than keys (i think) 2020-01-11 09:07:39 +11:00
Mike Farah
854f5f0fc9 wip json encoding 2020-01-10 22:01:59 +11:00
Mike Farah
feba7b04fa Added path stack to string test 2020-01-09 21:36:05 +11:00
Mike Farah
0621307391 Fixed linting errors 2020-01-09 21:27:52 +11:00
Mike Farah
924eb6c462 Added missing functions to interface 2020-01-09 21:18:24 +11:00
Mike Farah
52eef67e37 more tests, some refactoring 2020-01-09 08:17:56 +11:00
Mike Farah
38d35185bc Can overwrite and append with merge 2020-01-06 16:27:00 +13:00
Mike Farah
d8c29b26c1 Merge can allow empty merges! 2020-01-06 16:22:24 +13:00
Mike Farah
e3f4eedd51 Fixed merge new array 2020-01-06 10:12:38 +13:00
Mike Farah
690da9ee74 Fixed merge new array 2020-01-06 10:12:30 +13:00
Mike Farah
1f7f1b0def Merge arrays! 2020-01-05 17:28:24 +13:00
Mike Farah
1aa5ec1d40 Merge! wip 2020-01-05 17:14:14 +13:00
Mike Farah
a065a47b37 Fixed tests 2020-01-05 16:22:18 +13:00
Mike Farah
625cfdac75 wip; 2019-12-31 15:21:39 +13:00
Mike Farah
4dbdd4a805 Deep splat! 2019-12-30 16:51:07 +13:00
Mike Farah
8a6af1720d Fixed modify array issue! 2019-12-30 11:21:21 +13:00
Mike Farah
0652f67a91 Refactored! 2019-12-28 20:19:37 +13:00
Mike Farah
df52383ffb Delete works! needs refactor 2019-12-28 10:51:54 +13:00
Mike Farah
707ad09ba5 Refactor wip 2019-12-27 19:06:58 +11:00
Mike Farah
cf389bed4a Refactor wip 2019-12-27 19:06:08 +11:00
Mike Farah
ff5b23251b Refactor wip 2019-12-25 12:11:04 +11:00
Mike Farah
9925b26b9d Added Key and Value printing tests 2019-12-24 10:46:21 +11:00
Mike Farah
93dbe80a77 wip 2019-12-24 10:35:57 +11:00
Mike Farah
27604289f4 Log out original error if removing temporary file failed 2019-12-23 12:09:38 +11:00
Mike Farah
3f36a18791 Add readme notice re v3 2019-12-23 10:01:55 +11:00
Mike Farah
1e541cd65f wip handle aliases when printing keys 2019-12-23 09:25:44 +11:00
Mike Farah
5204a13685 Show paths 2019-12-23 09:08:00 +11:00
Mike Farah
3d3eaf3034 Return path, smart print 2019-12-22 17:16:03 +11:00
Mike Farah
4fb44dbc47 Return path, smart print 2019-12-22 17:13:11 +11:00
Mike Farah
784513dd18 Merge anchors - refactored 2019-12-22 15:35:16 +11:00
Mike Farah
865a55645c Merge anchors - refactored 2019-12-22 15:33:54 +11:00
Mike Farah
949bf1c1d7 Merge anchors - wip 2019-12-22 15:15:15 +11:00
Mike Farah
19fe718cfb Aliases! 2019-12-16 21:09:23 +11:00
Mike Farah
290579ac7f Handle simple aliases 2019-12-16 20:38:55 +11:00
Mike Farah
d7392f7b58 Refactoring 2019-12-16 16:46:20 +11:00
Mike Farah
a3cebec2fd Added prefix command 2019-12-16 16:17:01 +11:00
Mike Farah
b81fd638d7 wip - new node 2019-12-15 19:34:05 +11:00
Mike Farah
2344638da4 Fixed delete splat 2019-12-15 18:53:49 +11:00
Mike Farah
8be006fba4 Fixed delete splat 2019-12-15 18:52:37 +11:00
Mike Farah
53a4a47ce3 wip - prefix splat 2019-12-15 18:38:40 +11:00
Mike Farah
5988d0cffa Simplified 2019-12-15 18:24:23 +11:00
Mike Farah
b7640946ac Delete! 2019-12-15 17:31:26 +11:00
Mike Farah
d061b2f9f9 Can delete arrays 2019-12-12 20:47:22 +11:00
Mike Farah
8c0046a622 Refactoring 2019-12-09 13:57:38 +11:00
Mike Farah
586ffb833b Refactoring 2019-12-09 13:57:10 +11:00
Mike Farah
9771e7001c splatting 2019-12-09 13:44:53 +11:00
Mike Farah
8da9a81702 visitor! 2019-12-08 15:59:24 +11:00
Mike Farah
d97f1d8be2 recurse 2019-12-08 15:37:30 +11:00
Mike Farah
dad61ec615 remove json conversion for now 2019-12-06 16:52:00 +11:00
Mike Farah
676fc63219 remove json conversion for now 2019-12-06 16:41:21 +11:00
Mike Farah
972e2b9575 wip 2019-12-06 16:36:42 +11:00
Mike Farah
aad15ccc6e better v3 2019-12-06 15:57:46 +11:00
Conor Nosal
5fc13bdccd update imports for v2 module path 2019-12-06 13:58:56 +11:00
Conor Nosal
95fec2984e Move parseValue to yqlib/value_parser.go 2019-12-06 13:58:56 +11:00
Conor Nosal
64d1e58f97 test coverage and linting 2019-12-06 13:58:56 +11:00
Conor Nosal
4b3fbb878f Split marshal package from yqlib, implement interfaces 2019-12-06 13:58:56 +11:00
Conor Nosal
26a09e6ec0 Move implementation files to yqlib and test packages to allow for imports:
- Move data_navigator, json_converter, merge, and path_parser to pkg/yqlib
- Extract yamlToString from yq to pkg/yqlib/yaml_converter
- Move utils_test to test/utils
2019-12-06 13:58:56 +11:00
Mike Farah
ceafed30f9 attempt to fix go get 2019-12-02 10:38:44 +11:00
Mike Farah
b8b2c9de61 attempt to fix go get 2019-12-02 10:37:06 +11:00
Mike Farah
f5fdf98c38 Updating release instructions 2019-11-13 12:26:33 +11:00
Mike Farah
29986db8f8 Update doc for go get 2019-11-13 12:25:24 +11:00
Mike Farah
1f4e3a9cde Ignore vendor folder 2019-11-06 11:45:31 +11:00
Mike Farah
b6da773dde Bumbed snapcraft version 2019-11-01 12:40:35 +11:00
Mike Farah
8020d4253b Updated README 2019-11-01 12:40:21 +11:00
Mike Farah
3c701fe98e Incremented version 2019-11-01 10:47:38 +11:00
Mike Farah
97d1aa2b26 Added formatter, fixed docker build 2019-10-31 08:21:19 +11:00
Elliot
d05391e244 update ci to use go 1.13, switch to golangci-lint 2019-10-31 08:21:19 +11:00
Elliot
d1cec1ad18 fix to make it work with modules 2019-10-31 08:21:19 +11:00
Mike Farah
fe5842e5f9 Fix: Only remove original file if copying was successful 2019-08-27 09:21:39 +10:00
Aleksandr Sergin
e0d8cd6bf6 yq small fix 2019-08-27 09:18:38 +10:00
Aleksandr Sergin
8f5ffe47ff return to old behavior, small fix. 2019-08-27 09:18:38 +10:00
Aleksandr Sergin
5acc1e661e fix linter error 2019-08-27 09:18:38 +10:00
Aleksandr Sergin
a9c0ef571c fix linter error 2019-08-27 09:18:38 +10:00
Aleksandr Sergin
7a28531f2f fix linter error 2019-08-27 09:18:38 +10:00
Aleksandr Sergin
5de2bea1b4 fix linter error 2019-08-27 09:18:38 +10:00
Aleksandr Sergin
7320b8d3c9 fix linter error 2019-08-27 09:18:38 +10:00
Aleksandr Sergin
2f70e6f27a fix linter error 2019-08-27 09:18:38 +10:00
Aleksandr Sergin
a9e871ee00 add Move func to avoid 'invalid cross-device link' 2019-08-27 09:18:38 +10:00
Azimjon Ilkhomov
bc4bab9380 Fix typo 2019-08-05 11:30:20 +10:00
Mike Farah
665d9079fa Added goreport to readme 2019-07-23 10:07:59 +10:00
Mike Farah
7b54a44fcf Fixed readme for bash docker alias to bash function, thanks @dead10ck 2019-07-23 09:57:58 +10:00
Kenny Jones
94148e4398
Merge pull request #256 from arnaud-deprez/patch-1
doc: use '--rm' instead of '-rm' for docker command
2019-07-02 09:32:51 -04:00
Arnaud Deprez
f8553162ca
doc: use '--rm' instead of '-rm' for docker command 2019-07-02 12:17:35 +00:00
Eduardo Minguez Perez
be532bf2fe Added '-rm' docker flag to remove the containers
Also, added a handy alias just in case.
2019-06-10 21:59:40 +10:00
Mike Farah
e9b8265ca3 Updated docs re creating a new array 2019-05-16 14:57:34 +10:00
Mike Farah
0f8b864321 Updated instructions 2019-05-16 14:56:00 +10:00
Mike Farah
e5bcfedbe9 updating docs 2019-05-16 14:55:32 +10:00
Mike Farah
a5f5fb2562 Publish moves files after uploading for speedy retries 2019-05-16 14:54:58 +10:00
Mike Farah
84de9c078d Improved handling of numeric keys
When there is no match at a given path, numeric keys are assumed to be strings.
To create an array '+' must be used.

e.g: yq n thing[+].cat fred
will create an array under thing, whereas
yq n thing[0].cat fred
will create a map under thing, with a key '0'
2019-05-16 10:03:54 +10:00
Mike Farah
c7f5261036 Bump version 2019-05-16 10:03:54 +10:00
Georgi Knox
6e35356a84 add help description 2019-05-16 09:35:16 +10:00
Mike Farah
b2fe3e6738 fixing delete splat 2019-05-14 11:20:41 +10:00
Mike Farah
774badfef4 Can delete splat!
Fixes https://github.com/mikefarah/yq/issues/175
2019-05-13 09:48:05 +10:00
Mike Farah
c4e9516aa6 Prefix matching splat
Fixes https://github.com/mikefarah/yq/issues/218
2019-05-13 09:32:08 +10:00
Mike Farah
323089eb64 fixed tests for write array splat 2019-05-13 09:13:45 +10:00
Mike Farah
53289366a5 Write array splat 2019-05-01 08:55:44 +10:00
Mike Farah
cda9a82906 Refactoring write command to allow splat 2019-05-01 08:40:35 +10:00
Mike Farah
2dbde6b9fb Can process numeric keys
Fixes: https://github.com/mikefarah/yq/issues/215
2019-04-30 09:43:09 +10:00
Mike Farah
f8c1c3c1b4 Updated instructions w.r.t keys and values starting with dashes 2019-04-29 16:14:33 +10:00
Mike Farah
238a1241d2 Added snap specific notes
https://forum.snapcraft.io/t/requesting-classic-confinement-for-yq/10559
2019-03-27 09:27:07 +11:00
Mike Farah
8a61ef072a Revert "Snapcraft classic confinment to allow access to system resources"
Snap doesn't let me use classic

This reverts commit 4f178d23174c7b730c8142e555fa2f2b706f7ea9.
2019-03-25 09:28:06 +11:00
Mike Farah
4f178d2317 Snapcraft classic confinment to allow access to system resources
https://docs.snapcraft.io/snap-confinement/6233
2019-03-25 09:19:29 +11:00
Mike Farah
133e55105c Bump snapcraft go version 2019-03-22 16:32:30 +11:00
Mike Farah
5e5468af3b Release instructions update 2019-03-22 16:03:11 +11:00
Mike Farah
9b4972e46e Increment version for new functionality 2019-03-22 09:15:26 +11:00
Renzo Crisóstomo
23543ee031 Add test for --allow-empty flag in merge command 2019-03-22 09:13:39 +11:00
Renzo Crisóstomo
75c7d40c44 Add --allow-empty flag to merge command 2019-03-22 09:13:39 +11:00
Mikhail Novosyolov
44b8a5e80f Fix Debian 'Architecture'
When it's 'all', architecture-independent packages are built, and so now an x86_64 executable is packaged to noarch packaged.
2019-03-21 12:39:47 +11:00
Mike Farah
77c8f22a79 Bump golang version to 1.11 2019-01-21 09:37:22 +11:00
Roberto Mier Escandon
386a0ca3c3 Bump debian pkg to 2.2-1 version 2019-01-21 09:06:43 +11:00
Mike Farah
53a20d4421 Bumb version 2019-01-07 10:23:37 +11:00
Kyle Titus
478208b7c4 Added Windows support for the "--inplace" command flag 2019-01-07 10:07:08 +11:00
Mike Farah
1159d0a212 Fixing snapcraft yml 2018-11-21 13:39:03 +11:00
Mike Farah
8861b392a8 Bump version 2018-11-20 09:49:44 +11:00
Mike Farah
16bde80334 Prefix now supports arrays 2018-11-20 09:47:17 +11:00
matfax
8a3fb32f36 Update documentation 2018-11-20 08:53:25 +11:00
matfax
48dcc15281 feat: add prefix command 2018-11-20 08:53:25 +11:00
Matthias Fax
d7040e3933 Bump Alpine version to 3.8
There has been a security issue with older Alpine versions and their package manager.
https://justi.cz/security/2018/09/13/alpine-apk-rce.html

Not sure about 3.7, but the latest 3.8 image has it fixed.
2018-11-19 08:39:20 +11:00
Mike Farah
ef579d4ccf Updated release instructions 2018-10-30 13:18:52 +11:00
Mike Farah
785ee68a76 Improved docker build process 2018-10-25 17:49:46 +11:00
Mike Farah
90fe9c6512 Version bump 2018-10-25 16:05:43 +11:00
Thad Craft
a9ade5a832 fixing test lint 2018-10-25 15:46:57 +11:00
Thad Craft
8d6e3a6a75 copying file permissions from original file when inline merging - Closes #180 2018-10-25 15:46:57 +11:00
Mike Farah
7a6689eb40 Fixed latest linting issues 2018-08-06 16:24:06 +10:00
Mike Farah
e6660e2460
Update README.md 2018-07-31 19:04:34 +10:00
Mike Farah
28169b04f7 Added build support for all linux architectures supported by gox 2018-07-23 09:16:52 +10:00
Mike Farah
742cf748ac Added support for PPC architectures 2018-07-18 13:45:15 +10:00
Mike Farah
54c06cdb4c Update release instructions re version tag 2018-07-15 08:18:11 +10:00
Roberto Mier Escandon
ef5f745da3 Fix version typo 2018-07-11 08:49:37 +10:00
Roberto Mier Escandon
25025f2771 Bump debian package version 2018-07-11 08:49:37 +10:00
Mike Farah
ebb8b34b31 Updating readme badges 2018-07-10 22:00:50 +10:00
Mike Farah
9a5e4ee828 Fixed whitespace in badge 2018-07-10 21:51:47 +10:00
Benedikt Franke
0a2dd29940 Add badge for docker build 2018-07-10 21:49:59 +10:00
Mike Farah
50dfce86c9 Updated mkdocs library 2018-07-10 21:29:41 +10:00
Mike Farah
e55006e935 Document multi document read feature 2018-07-10 21:24:13 +10:00
Mike Farah
d8fed62f03 merge documentation 2018-07-10 21:24:13 +10:00
Mike Farah
52a39bf31e Incrementing snapcraft version 2018-07-10 21:12:32 +10:00
Mike Farah
d84254c30f moar tests 2018-07-08 22:04:31 +10:00
Mike Farah
18bb4eee96 moar tests 2018-07-08 21:57:56 +10:00
Mike Farah
86b9fe3ef9 Can read from all documents 2018-07-08 21:47:01 +10:00
Mike Farah
2c15048ddb Added merge with append 2018-07-07 15:26:56 +10:00
Mike Farah
ce2ee42f71 Release instructions for gopkg 2018-07-01 14:45:19 +10:00
Mike Farah
c2c49dcb17 Fixed help length to prevent horizontal scroll in README 2018-06-27 19:37:18 +10:00
Mike Farah
60de18391c Incrementing version for next release 2018-06-27 12:06:46 +10:00
Mike Farah
c86f8b426b Fixed writing inplace from docker 2018-06-27 12:06:31 +10:00
Mike Farah
b3532e0a61 Cache devtools and vendor libs in docker 2018-06-27 12:05:52 +10:00
Mike Farah
b3b60665e4 Fixed toJson command line option, should only apply to read command 2018-06-26 14:09:56 +10:00
Mike Farah
df08b055cf Shorten instructions in readme to avoid horizontal scroll 2018-06-20 19:50:19 +10:00
Mike Farah
e822313e82 Merge branch 'multi' 2018-06-20 19:48:44 +10:00
Mike Farah
a9f25c9d76 Added more snapcraft instructions 2018-06-20 19:46:47 +10:00
Roberto Mier Escandon
0a8c268dcc Bumped deb version 2018-06-20 19:28:44 +10:00
Mike Farah
a0e70279a8 Updating snapcraft version 2018-06-20 17:51:07 +10:00
Mike Farah
25c9c22d8d Updating docs 2018-06-20 14:29:17 +10:00
Mike Farah
d46d555b07 Incrementing version 2018-06-20 14:29:10 +10:00
Mike Farah
fb87f638f2 Multi doc supports updating all docs 2018-06-20 11:45:51 +10:00
Mike Farah
facc81d1f4 github version of mousetrap required for xcompile 2018-06-20 08:14:14 +10:00
Mike Farah
c1f9065c68 pflag has to be github :eye_roll: 2018-06-18 20:12:09 +10:00
Mike Farah
be84cc3082 Add pflag back in 2018-06-18 20:08:41 +10:00
Mike Farah
a2571da1a1 Updated docs to refer to gopkg.in when using go get 2018-06-18 11:45:46 +10:00
Mike Farah
6d6e476ac8 Use gopkg managed versions of dependencies, for better go get support 2018-06-18 11:37:42 +10:00
Mike Farah
ae0c042ae6 Use gopkg managed version of yaml to properly support go get 2018-06-18 11:12:52 +10:00
Mike Farah
867ec92d3a Version bump 2018-06-15 20:50:20 +10:00
Mike Farah
113586b5e0 Updating help for multi doc 2018-06-15 20:31:29 +10:00
Mike Farah
c38f19e0a9 Enabled multi document support for merge (first document only) 2018-06-15 16:48:36 +10:00
Mike Farah
8ca85b1c64 Simplified merge command 2018-06-15 16:40:52 +10:00
Mike Farah
08870f8ec9 Simplified 'new' command 2018-06-15 16:21:18 +10:00
Mike Farah
94b217984c Better error handling 2018-06-15 16:11:13 +10:00
Mike Farah
2f5a481cc3 Detect when there is no document X to update 2018-06-15 09:54:11 +10:00
Mike Farah
1a4064429d Delete now supports multi docs! 2018-06-15 09:43:20 +10:00
Mike Farah
1b22e1d812 Fixed delete command for arrays 2018-06-15 09:03:42 +10:00
Mike Farah
297522cbdd Write supports multidoc yaml, better use of yaml library streaming 2018-06-15 08:39:59 +10:00
Mike Farah
be991fdacd Read test 2018-06-15 08:39:29 +10:00
Mike Farah
be08214773 fixed version test 2018-06-13 10:00:01 +10:00
Mike Farah
9e971ebeae Beta version of multiple document support for read, write coming soon 2018-06-13 09:26:52 +10:00
Mike Farah
f340db5795 Extract out reading of write commands 2018-06-13 09:24:37 +10:00
Mike Farah
ab852ceafa Separate reading stream from processing 2018-06-13 09:11:54 +10:00
Mike Farah
06a843e9b2 Read now handles multiple documents 2018-06-12 15:41:09 +10:00
Mike Farah
ebdc092688 Updating user docs 2018-06-12 10:17:09 +10:00
Roberto Mier Escandon
27089d1ca1 Updated some documentation pointing to deb install option 2018-06-12 10:12:55 +10:00
Roberto Mier Escandon
1853585d22 Add debian packaging 2018-06-12 10:12:55 +10:00
Mike Farah
0124d26086 Added mkdocs instructions to Contribute 2018-06-12 10:11:15 +10:00
Mike Farah
0d68bea3dc Release instructions 2018-05-08 10:55:16 +10:00
Mike Farah
54603b3607 Updated version in snapcraft.yml 2018-05-08 10:06:41 +10:00
Mike Farah
c86aeca325 Fixed script for publishing from a mac 2018-05-07 20:39:18 +10:00
Mike Farah
a4f124f24f Updated repo name 2018-05-07 16:40:23 +10:00
Mike Farah
090b377fa5 Incrementing version to reflect new delete feature! 2018-05-07 16:38:26 +10:00
Mike Farah
3a4d62d820 Adding delete command documentation 2018-05-07 16:34:29 +10:00
Mike Farah
6053c8c136 Use dev scripts for making docker image 2018-05-07 16:31:26 +10:00
Mike Farah
c5a45ba7d5 Removed dud instructions 2018-05-07 16:26:38 +10:00
Matthew Huxtable
56a0771cd1 Mark test helpers as such 2018-05-07 15:52:29 +10:00
Matthew Huxtable
8072e66d46 Add delete command
The delete (short option "d") will delete the YAML subtree at the
provided path in the specified file (or STDIN), if it the node exists.

More complex support is currently omitted, for example:

  - specify nodes to delete using an external script
  - deleting common elements from all elements of an array
2018-05-07 15:52:29 +10:00
Tim Hobbs
26153b3eb5 Rename to yq 2018-04-09 15:58:12 +10:00
Tim Hobbs
f2e10f21c7 Rename to yq 2018-04-09 15:58:12 +10:00
spawnia
d3ecf7aa88 Add Dockerfile for building the official CLI container 2018-04-03 09:25:00 +10:00
Mike Farah
ee8ffd458a Multiline value fix - multi line strings no longer printed as a yaml block
Although printing the string as a yaml block can be argued to be technically correct, in practical terms it's more useful to just print out the multiline string as is.
2018-03-27 16:22:24 +11:00
Mike Farah
8f15dba812 Updated docs to explicity put array paths in quotes 2018-02-28 08:07:34 +11:00
Mike Farah
92ce195424 Fixed version test 2018-02-19 14:50:46 +11:00
Mike Farah
e725a2cec5 Fixed docs 2018-02-19 14:21:38 +11:00
Mike Farah
37de7a3505 Don't default to dev version 2018-02-19 14:07:21 +11:00
Mike Farah
e96794a420 Remove version pre release
this is appearing in the snap release :(
2018-02-19 13:27:22 +11:00
Mike Farah
120ef8e4dc drop dev from version 2018-02-19 09:45:10 +11:00
David Bliss
db1954da25 Hard-code version and change grade to 'stable' 2018-02-19 08:23:30 +11:00
David Bliss
e552dbb6ab Add install instructions to documentation 2018-02-19 08:23:30 +11:00
David Bliss
298126864f Remove comment 2018-02-19 08:23:30 +11:00
David Bliss
d30cd8cc75 Add snap target to Makefile 2018-02-19 08:23:30 +11:00
David Bliss
e6ee86930b Support building snap packages 2018-02-19 08:23:30 +11:00
Mike Farah
0ea3c71df7 No longer using circle ci 2017-12-27 14:22:17 +11:00
Mike Farah
ef99bedf45 Added circle ci dashtic config 2017-12-27 14:16:57 +11:00
Mike Farah
6e1a5aef2c Add dev tools to circle ci 2017-12-27 14:10:47 +11:00
Mike Farah
70bd860287 Added circle ci config 2017-12-27 14:08:28 +11:00
Mike Farah
0abda0098c Now on brew! 2017-12-25 06:30:49 +11:00
Mike Farah
a409a7e82b Missed one replace in docs 2017-12-19 12:01:23 +11:00
Mike Farah
0e4f9e8579 Update docs 2017-12-19 11:59:27 +11:00
Mike Farah
0c932ba7dc Updated travis ci image 2017-12-19 11:39:54 +11:00
Mike Farah
cb48ba7173 Rename to yq 2017-12-18 15:29:41 +11:00
Mike Farah
dc4f8a6adb Fixed vendor scripts 2017-12-18 15:29:41 +11:00
Mike Farah
1f6d7a50b2 Clearer README 2017-12-18 15:29:41 +11:00
Kenny Jones
6fc3bdf58f Merge pull request #103 from mikefarah/task/upgrade-to-golang1.9
Task: Upgrade to Golang 1.9
2017-09-27 07:07:16 -04:00
kenjones
87b33e8603 Task: Upgrade to Golang 1.9 2017-09-27 07:05:18 -04:00
kenjones
cc7eb84388 Task: Add docs on keys with dots
Adds documentation on how to use a key lookup indicator for dealing
with keys that have dots when specifying a path as part of create,
read, and write commands.

Resolves: #22
2017-09-25 22:36:49 -04:00
kenjones
9e3f8ebd0a Task: Prepare for development 2017-09-25 09:41:27 -04:00
kenjones
28bcbd321f Release v1.13.1 2017-09-25 09:35:23 -04:00
Kenny Jones
9dd4503b23 Merge pull request #42 from mikefarah/bugfix/append-empty-array
Bugfix: Append when array empty results in null value
2017-09-25 09:34:00 -04:00
kenjones
185d1faadb Bugfix: Append when array empty results in null value
Initialize new array as empty instead of having a single value.

Resolves: #41
2017-09-25 09:31:06 -04:00
kenjones
7b004cb456 Task: Prepare for development 2017-09-24 21:28:28 -04:00
kenjones
54c73eb34f Release v1.13.0 2017-09-24 21:20:02 -04:00
Kenny Jones
3b03a0852a Merge pull request #40 from kenjones-cisco/task/releasing
Task: Add release process
2017-09-24 21:13:06 -04:00
kenjones
a00e6c5316 Task: Add release process 2017-09-24 21:10:57 -04:00
Kenny Jones
9d1a5b17b8 Merge pull request #39 from kenjones-cisco/feature/append-array
Feature: Add append to array
2017-09-23 20:24:09 -04:00
kenjones
c5f80a105d Feature: Add append to array
Adds the ability to append a value to an array instead of having to
know the index of the values within the array.

Resolves: #17
2017-09-23 20:21:16 -04:00
Kenny Jones
c17f8df8f8 Merge pull request #38 from kenjones-cisco/task/add-merge-docs
Task: Add `merge` command docs
2017-09-23 15:36:31 -04:00
kenjones
66c8390d54 Task: Add merge command docs
Resolves: #35
2017-09-23 15:33:23 -04:00
Kenny Jones
dda9b1f087 Merge pull request #37 from kenjones-cisco/bugfix/read-yaml-array
Bugfix: Read yaml array
2017-09-23 14:04:33 -04:00
Kenny Jones
683de28c64 Merge pull request #34 from kenjones-cisco/feature/add-version
Feature: Adds support for --version
2017-09-23 14:04:18 -04:00
kenjones
51fa1a87b7 Bugfix: Read yaml array
Parsing an array into MapSlice results in `[{nil, nil}]`.
Parse into `[]map[interface{}]interface{}` allows for the processing of
top level arrays in the document.

Resolves: #23
2017-09-23 13:53:04 -04:00
Kenny Jones
b9ac6a3d9d Merge pull request #36 from kenjones-cisco/bugfix/write-empty-array
Bugfix: Write empty array value
2017-09-23 09:39:01 -04:00
kenjones
499974c27e Bugfix: Write empty array value
When the value is `[]` set the value to empty array instead of `'[]'`.

Resolves: #21
2017-09-23 09:36:17 -04:00
kenjones
72bd88cfa5 Feature: Adds support for --version
Resolves: #24
2017-09-23 01:08:39 -04:00
kenjones
6980be3800 Feature: Adds merge command
Adds merge command for merging multiple yaml files together.

Resolves: #31
2017-09-23 14:45:59 +10:00
Kenny Jones
2933ea1684 Merge pull request #33 from kenjones-cisco/bugfix/acceptance
Bugfix: Resolve failing acceptance test
2017-09-23 00:00:48 -04:00
kenjones
cf2f23d747 Bugfix: Resolve failing acceptance test 2017-09-22 23:58:50 -04:00
Kenny Jones
45e9ad870f Merge pull request #30 from kenjones-cisco/task/refactor-add-tests
Task: Increase test coverage, includes refactor
2017-09-22 23:09:04 -04:00
kenjones
53b2c64747 Task: Increase test coverage, includes refactor
Adds test cases to increase test coverage.
Refactors code to enable adding tests by reducing the number of
locations where `os.Exit()` is called from.
2017-09-22 23:01:46 -04:00
Mike Farah
359ca5a117 Merge branch 'kenjones-cisco-bugfix/nonstring-keys' 2017-09-23 08:56:55 +10:00
Mike Farah
79baa49eaa Merge branch 'bugfix/nonstring-keys' of git://github.com/kenjones-cisco/yaml into kenjones-cisco-bugfix/nonstring-keys 2017-09-23 08:54:08 +10:00
Mike Farah
2cda78ce0b Vendor shas 2017-09-23 08:47:18 +10:00
Mike Farah
6d1e61c410 Merge branch 'master' into bugfix/nonstring-keys 2017-09-23 08:38:37 +10:00
kenjones
86639acf70 Task: Simplify development
The base directory has all shell scripts in scripts/
and all example/test files in examples/.
A Makefile provides all the commands with helpful information.
If a developer simply types `make` then vendor is properly updated,
the code is formatted, linted, tested, built, acceptance test run,
and installed.

Linting errors resolved.
Ignored test case (`TestParsePath`) updated to work as expected.
2017-09-23 08:37:34 +10:00
kenjones
3beee3f804 Bugfix: Panic on non-string keys
Adds check if the key is an `int` or `bool`, and converts to a string
as part of the `toJSON` function.
Test cases added.

Resolves: #28
2017-09-21 12:01:03 -04:00
Mike Farah
1ed8e7017e Can create arrays 2017-08-08 17:04:30 +10:00
Mike Farah
5514d2300b Can update arrays 2017-08-08 16:55:57 +10:00
Mike Farah
5bb0934710 Handle arrays and strings when reading 2017-08-08 16:42:11 +10:00
Mike Farah
cdd78af4bc Fixed bug #11: Inability to set field to empty string 2017-06-06 10:19:32 +10:00
Mike Farah
d07e436065 Update docs re using values that look like flags 2017-05-03 10:42:53 +10:00
Mike Farah
b2a0964de2 switched from godeps to govendor, now supports new 'vendor' folder 2017-05-03 09:19:55 +10:00
Mike Farah
69e6bb354d Added doco regarding values starting with hyphens 2017-05-03 08:11:26 +10:00
Mike Farah
ec25886528 Removed redundant code, updated doc w.r.t reading json files 2017-04-19 15:45:45 +10:00
Mike Farah
c2000a446b Fixed writeCommands to maintain the same execution as was read in 2017-04-18 15:55:43 +10:00
Mike Farah
08ba0083be formatting 2017-04-18 09:14:06 +10:00
Mike Farah
909b62be54 Fixed ability to grow arrays 2017-04-18 08:53:27 +10:00
Mike Farah
0facf35d60 Fixed test flakeyness with using printf 2017-04-14 12:56:50 +10:00
Mike Farah
a7c4813703 Updated doco 2017-04-14 12:40:03 +10:00
Mike Farah
373e0a5eb7 More tests 2017-04-14 12:39:55 +10:00
Mike Farah
124c57f9d9 README now links to doco 2017-04-13 15:44:31 +10:00
Mike Farah
fb73a793bd Added doco 2017-04-13 15:36:59 +10:00
Mike Farah
c22394b540 Updated readme 2017-04-12 21:30:29 +10:00
Mike Farah
b1ff47022b Updated readme 2017-04-12 21:20:43 +10:00
Mike Farah
82f1230db8 Added "new" command for creating new yaml 2017-04-12 21:10:00 +10:00
Mike Farah
a3190f1504 Refactor write to allow new entries 2017-04-12 20:25:13 +10:00
Mike Farah
44ee869e47 Maintain order 2017-02-27 09:09:29 +11:00
Mike Farah
5d2d37a5f8 Fixed travis script, added Travis banner to readme 2017-02-13 07:55:06 +11:00
Mike Farah
e3fc944709 Added travis config 2017-02-13 07:28:48 +11:00
Mike Farah
6ee9db9a70 Added License (MIT) 2017-02-10 16:00:25 +11:00
Mike Farah
0d75edfb67 Fixed updating array bug (Issue 3 in git) 2017-02-09 13:58:47 +11:00
328 changed files with 16264 additions and 22850 deletions

1
.dockerignore Normal file
View File

@ -0,0 +1 @@
bin/*

1
.github/FUNDING.yml vendored Normal file
View File

@ -0,0 +1 @@
github: mikefarah

51
.github/ISSUE_TEMPLATE/bug_report_v3.md vendored Normal file
View File

@ -0,0 +1,51 @@
---
name: Bug report - V3
about: Create a report to help us improve
title: ''
labels: bug, v3
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
Note that any how to questions should be posted in the discussion board and not raised as an issue.
Version of yq: 3.X.X
Operating system: mac/linux/windows/....
Installed via: docker/binary release/homebrew/snap/...
**Input Yaml**
Concise yaml document(s) (as simple as possible to show the bug, please keep it to 10 lines or less)
data1.yml:
```yaml
this: should really work
```
data2.yml:
```yaml
but: it strangely didn't
```
**Command**
The command you ran:
```
yq merge data1.yml data2.yml
```
**Actual behavior**
```yaml
cat: meow
```
**Expected behavior**
```yaml
this: should really work
but: it strangely didn't
```
**Additional context**
Add any other context about the problem here.

51
.github/ISSUE_TEMPLATE/bug_report_v4.md vendored Normal file
View File

@ -0,0 +1,51 @@
---
name: Bug report - V4
about: Create a report to help us improve
title: ''
labels: bug, v4
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
Note that any how to questions should be posted in the discussion board and not raised as an issue.
Version of yq: 4.X.X
Operating system: mac/linux/windows/....
Installed via: docker/binary release/homebrew/snap/...
**Input Yaml**
Concise yaml document(s) (as simple as possible to show the bug, please keep it to 10 lines or less)
data1.yml:
```yaml
this: should really work
```
data2.yml:
```yaml
but: it strangely didn't
```
**Command**
The command you ran:
```
yq eval-all 'select(fileIndex==0) | .a.b.c' data1.yml data2.yml
```
**Actual behavior**
```yaml
cat: meow
```
**Expected behavior**
```yaml
this: should really work
but: it strangely didn't
```
**Additional context**
Add any other context about the problem here.

View File

@ -0,0 +1,42 @@
---
name: Feature request - V4
about: Suggest an idea for this project
title: ''
labels: enhancement, v4
assignees: ''
---
**Please describe your feature request.**
A clear and concise description of what the request is and what it would solve.
Eg. I wish I could use yq to [...]
Note:
- how to questions should be posted in the discussion board and not raised as an issue.
- V3 will no longer have any enhancements.
**Describe the solution you'd like**
If we have data1.yml like:
(please keep to around 10 lines )
```yaml
country: Australia
```
And we run a command:
```bash
yq eval 'predictWeatherOf(.country)'
```
it could output
```yaml
temp: 32
```
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

31
.github/workflows/go.yml vendored Normal file
View File

@ -0,0 +1,31 @@
name: Build
on: [push]
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.15
uses: actions/setup-go@v1
with:
go-version: 1.15
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Get dependencies
run: |
go get -v -t -d ./...
if [ -f Gopkg.toml ]; then
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
dep ensure
fi
- name: Download deps
run: |
export PATH=${PATH}:`go env GOPATH`/bin
scripts/devtools.sh
make local build

70
.github/workflows/release.yml vendored Normal file
View File

@ -0,0 +1,70 @@
name: Release YQ
on:
push:
tags:
- 'v*'
jobs:
publishGitRelease:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: '^1.15'
- name: Cross compile
run: |
sudo apt-get install rhash -y
go get github.com/mitchellh/gox
./scripts/xcompile.sh
- name: Create Release
id: create_release
uses: actions/create-release@v1.0.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: ${{ github.ref }}
draft: true
prerelease: false
- uses: shogo82148/actions-upload-release-asset@v1
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: build/*
publishDocker:
environment: dockerhub
env:
IMAGE_NAME: mikefarah/yq
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
with:
version: latest
- name: Available platforms
run: echo ${{ steps.buildx.outputs.platforms }} && docker version
- name: Build and push image
run: |
IMAGE_V_VERSION="$(git describe --tags --abbrev=0)"
IMAGE_VERSION=${IMAGE_V_VERSION:1}
SHORT_SHA1=$(git rev-parse --short HEAD)
PLATFORMS="linux/amd64,linux/ppc64le,linux/arm64"
echo "Building and pushing version ${IMAGE_VERSION} of image ${IMAGE_NAME}"
echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USERNAME }} --password-stdin
docker buildx build --platform "${PLATFORMS}" -t "${IMAGE_NAME}:${IMAGE_VERSION}" -t "${IMAGE_NAME}:latest" -t "${IMAGE_NAME}:4" \
--push .

15
.gitignore vendored
View File

@ -6,7 +6,9 @@
# Folders # Folders
_obj _obj
_test _test
bin
build build
build-done
.DS_Store .DS_Store
# Architecture specific extensions/prefixes # Architecture specific extensions/prefixes
@ -20,8 +22,19 @@ _cgo_gotypes.go
_cgo_export.* _cgo_export.*
_testmain.go _testmain.go
coverage.out
coverage.html
*.exe *.exe
*.test *.test
*.prof *.prof
yaml yaml
vendor/
tmp/
cover/
yq
# snapcraft
parts/
prime/
.snapcraft/
yq*.snap

76
CODE_OF_CONDUCT.md Normal file
View File

@ -0,0 +1,76 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at mikefarah@gmail.com. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq

8
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,8 @@
1. Install (golang)[https://golang.org/]
1. Run `scripts/devtools.sh` to install the required devtools
2. Run `make [local] vendor` to install the vendor dependencies
2. Run `make [local] test` to ensure you can run the existing tests
3. Write unit tests - (see existing examples). Changes will not be accepted without corresponding unit tests.
4. Make the code changes.
5. `make [local] test` to lint code and run tests
6. Profit! ok no profit, but raise a PR and get kudos :)

25
Dockerfile Normal file
View File

@ -0,0 +1,25 @@
FROM golang:1.15 as builder
WORKDIR /go/src/mikefarah/yq
# cache devtools
COPY ./scripts/devtools.sh /go/src/mikefarah/yq/scripts/devtools.sh
RUN ./scripts/devtools.sh
COPY . /go/src/mikefarah/yq
RUN CGO_ENABLED=0 make local build
# Choose alpine as a base image to make this useful for CI, as many
# CI tools expect an interactive shell inside the container
FROM alpine:3.12.3 as production
COPY --from=builder /go/src/mikefarah/yq/yq /usr/bin/yq
RUN chmod +x /usr/bin/yq
ARG VERSION=none
LABEL version=${VERSION}
WORKDIR /workdir
ENTRYPOINT ["/usr/bin/yq"]

10
Dockerfile.dev Normal file
View File

@ -0,0 +1,10 @@
FROM golang:1.15
COPY scripts/devtools.sh /opt/devtools.sh
RUN set -e -x \
&& /opt/devtools.sh
ENV PATH=/go/bin:$PATH
ENV CGO_ENABLED 0
ENV GOPATH /go:/yq

36
Godeps/Godeps.json generated
View File

@ -1,36 +0,0 @@
{
"ImportPath": "github.com/mikefarah/yaml",
"GoVersion": "go1.4.3",
"Deps": [
{
"ImportPath": "github.com/cpuguy83/go-md2man/md2man",
"Comment": "v1.0.4",
"Rev": "71acacd42f85e5e82f70a55327789582a5200a90"
},
{
"ImportPath": "github.com/inconshreveable/mousetrap",
"Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
},
{
"ImportPath": "github.com/russross/blackfriday",
"Comment": "v1.3",
"Rev": "8cec3a854e68dba10faabbe31c089abf4a3e57a6"
},
{
"ImportPath": "github.com/shurcooL/sanitized_anchor_name",
"Rev": "244f5ac324cb97e1987ef901a0081a77bfd8e845"
},
{
"ImportPath": "github.com/spf13/cobra",
"Rev": "d732ab3a34e6e9e6b5bdac80707c2b6bad852936"
},
{
"ImportPath": "github.com/spf13/pflag",
"Rev": "b084184666e02084b8ccb9b704bf0d79c466eb1d"
},
{
"ImportPath": "gopkg.in/yaml.v2",
"Rev": "53feefa2559fb8dfa8d81baad31be332c97d6c77"
}
]
}

5
Godeps/Readme generated
View File

@ -1,5 +0,0 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

2
Godeps/_workspace/.gitignore generated vendored
View File

@ -1,2 +0,0 @@
/pkg
/bin

View File

@ -1,19 +0,0 @@
package md2man
import (
"github.com/mikefarah/yaml/Godeps/_workspace/src/github.com/russross/blackfriday"
)
func Render(doc []byte) []byte {
renderer := RoffRenderer(0)
extensions := 0
extensions |= blackfriday.EXTENSION_NO_INTRA_EMPHASIS
extensions |= blackfriday.EXTENSION_TABLES
extensions |= blackfriday.EXTENSION_FENCED_CODE
extensions |= blackfriday.EXTENSION_AUTOLINK
extensions |= blackfriday.EXTENSION_SPACE_HEADERS
extensions |= blackfriday.EXTENSION_FOOTNOTES
extensions |= blackfriday.EXTENSION_TITLEBLOCK
return blackfriday.Markdown(doc, renderer, extensions)
}

View File

@ -1,269 +0,0 @@
package md2man
import (
"bytes"
"fmt"
"html"
"strings"
"github.com/mikefarah/yaml/Godeps/_workspace/src/github.com/russross/blackfriday"
)
type roffRenderer struct{}
func RoffRenderer(flags int) blackfriday.Renderer {
return &roffRenderer{}
}
func (r *roffRenderer) GetFlags() int {
return 0
}
func (r *roffRenderer) TitleBlock(out *bytes.Buffer, text []byte) {
out.WriteString(".TH ")
splitText := bytes.Split(text, []byte("\n"))
for i, line := range splitText {
line = bytes.TrimPrefix(line, []byte("% "))
if i == 0 {
line = bytes.Replace(line, []byte("("), []byte("\" \""), 1)
line = bytes.Replace(line, []byte(")"), []byte("\" \""), 1)
}
line = append([]byte("\""), line...)
line = append(line, []byte("\" ")...)
out.Write(line)
}
out.WriteString(" \"\"\n")
}
func (r *roffRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) {
out.WriteString("\n.PP\n.RS\n\n.nf\n")
escapeSpecialChars(out, text)
out.WriteString("\n.fi\n.RE\n")
}
func (r *roffRenderer) BlockQuote(out *bytes.Buffer, text []byte) {
out.WriteString("\n.PP\n.RS\n")
out.Write(text)
out.WriteString("\n.RE\n")
}
func (r *roffRenderer) BlockHtml(out *bytes.Buffer, text []byte) {
out.Write(text)
}
func (r *roffRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) {
marker := out.Len()
switch {
case marker == 0:
// This is the doc header
out.WriteString(".TH ")
case level == 1:
out.WriteString("\n\n.SH ")
case level == 2:
out.WriteString("\n.SH ")
default:
out.WriteString("\n.SS ")
}
if !text() {
out.Truncate(marker)
return
}
}
func (r *roffRenderer) HRule(out *bytes.Buffer) {
out.WriteString("\n.ti 0\n\\l'\\n(.lu'\n")
}
func (r *roffRenderer) List(out *bytes.Buffer, text func() bool, flags int) {
marker := out.Len()
out.WriteString(".IP ")
if flags&blackfriday.LIST_TYPE_ORDERED != 0 {
out.WriteString("\\(bu 2")
} else {
out.WriteString("\\n+[step" + string(flags) + "]")
}
out.WriteString("\n")
if !text() {
out.Truncate(marker)
return
}
}
func (r *roffRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) {
out.WriteString("\n\\item ")
out.Write(text)
}
func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) {
marker := out.Len()
out.WriteString("\n.PP\n")
if !text() {
out.Truncate(marker)
return
}
if marker != 0 {
out.WriteString("\n")
}
}
// TODO: This might now work
func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
out.WriteString(".TS\nallbox;\n")
out.Write(header)
out.Write(body)
out.WriteString("\n.TE\n")
}
func (r *roffRenderer) TableRow(out *bytes.Buffer, text []byte) {
if out.Len() > 0 {
out.WriteString("\n")
}
out.Write(text)
out.WriteString("\n")
}
func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
if out.Len() > 0 {
out.WriteString(" ")
}
out.Write(text)
out.WriteString(" ")
}
// TODO: This is probably broken
func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) {
if out.Len() > 0 {
out.WriteString("\t")
}
out.Write(text)
out.WriteString("\t")
}
func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) {
}
func (r *roffRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
}
func (r *roffRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) {
out.WriteString("\n\\[la]")
out.Write(link)
out.WriteString("\\[ra]")
}
func (r *roffRenderer) CodeSpan(out *bytes.Buffer, text []byte) {
out.WriteString("\\fB\\fC")
escapeSpecialChars(out, text)
out.WriteString("\\fR")
}
func (r *roffRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte) {
out.WriteString("\\fB")
out.Write(text)
out.WriteString("\\fP")
}
func (r *roffRenderer) Emphasis(out *bytes.Buffer, text []byte) {
out.WriteString("\\fI")
out.Write(text)
out.WriteString("\\fP")
}
func (r *roffRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
}
func (r *roffRenderer) LineBreak(out *bytes.Buffer) {
out.WriteString("\n.br\n")
}
func (r *roffRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
r.AutoLink(out, link, 0)
}
func (r *roffRenderer) RawHtmlTag(out *bytes.Buffer, tag []byte) {
out.Write(tag)
}
func (r *roffRenderer) TripleEmphasis(out *bytes.Buffer, text []byte) {
out.WriteString("\\s+2")
out.Write(text)
out.WriteString("\\s-2")
}
func (r *roffRenderer) StrikeThrough(out *bytes.Buffer, text []byte) {
}
func (r *roffRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
}
func (r *roffRenderer) Entity(out *bytes.Buffer, entity []byte) {
out.WriteString(html.UnescapeString(string(entity)))
}
func processFooterText(text []byte) []byte {
text = bytes.TrimPrefix(text, []byte("% "))
newText := []byte{}
textArr := strings.Split(string(text), ") ")
for i, w := range textArr {
if i == 0 {
w = strings.Replace(w, "(", "\" \"", 1)
w = fmt.Sprintf("\"%s\"", w)
} else {
w = fmt.Sprintf(" \"%s\"", w)
}
newText = append(newText, []byte(w)...)
}
newText = append(newText, []byte(" \"\"")...)
return newText
}
func (r *roffRenderer) NormalText(out *bytes.Buffer, text []byte) {
escapeSpecialChars(out, text)
}
func (r *roffRenderer) DocumentHeader(out *bytes.Buffer) {
}
func (r *roffRenderer) DocumentFooter(out *bytes.Buffer) {
}
func needsBackslash(c byte) bool {
for _, r := range []byte("-_&\\~") {
if c == r {
return true
}
}
return false
}
func escapeSpecialChars(out *bytes.Buffer, text []byte) {
for i := 0; i < len(text); i++ {
// directly copy normal characters
org := i
for i < len(text) && !needsBackslash(text[i]) {
i++
}
if i > org {
out.Write(text[org:i])
}
// escape a character
if i >= len(text) {
break
}
out.WriteByte('\\')
out.WriteByte(text[i])
}
}

View File

@ -1,13 +0,0 @@
Copyright 2014 Alan Shreve
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,23 +0,0 @@
# mousetrap
mousetrap is a tiny library that answers a single question.
On a Windows machine, was the process invoked by someone double clicking on
the executable file while browsing in explorer?
### Motivation
Windows developers unfamiliar with command line tools will often "double-click"
the executable for a tool. Because most CLI tools print the help and then exit
when invoked without arguments, this is often very frustrating for those users.
mousetrap provides a way to detect these invocations so that you can provide
more helpful behavior and instructions on how to run the CLI tool. To see what
this looks like, both from an organizational and a technical perspective, see
https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
### The interface
The library exposes a single interface:
func StartedByExplorer() (bool)

View File

@ -1,15 +0,0 @@
// +build !windows
package mousetrap
// StartedByExplorer returns true if the program was invoked by the user
// double-clicking on the executable from explorer.exe
//
// It is conservative and returns false if any of the internal calls fail.
// It does not guarantee that the program was run from a terminal. It only can tell you
// whether it was launched from explorer.exe
//
// On non-Windows platforms, it always returns false.
func StartedByExplorer() bool {
return false
}

View File

@ -1,98 +0,0 @@
// +build windows
// +build !go1.4
package mousetrap
import (
"fmt"
"os"
"syscall"
"unsafe"
)
const (
// defined by the Win32 API
th32cs_snapprocess uintptr = 0x2
)
var (
kernel = syscall.MustLoadDLL("kernel32.dll")
CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot")
Process32First = kernel.MustFindProc("Process32FirstW")
Process32Next = kernel.MustFindProc("Process32NextW")
)
// ProcessEntry32 structure defined by the Win32 API
type processEntry32 struct {
dwSize uint32
cntUsage uint32
th32ProcessID uint32
th32DefaultHeapID int
th32ModuleID uint32
cntThreads uint32
th32ParentProcessID uint32
pcPriClassBase int32
dwFlags uint32
szExeFile [syscall.MAX_PATH]uint16
}
func getProcessEntry(pid int) (pe *processEntry32, err error) {
snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0))
if snapshot == uintptr(syscall.InvalidHandle) {
err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1)
return
}
defer syscall.CloseHandle(syscall.Handle(snapshot))
var processEntry processEntry32
processEntry.dwSize = uint32(unsafe.Sizeof(processEntry))
ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
if ok == 0 {
err = fmt.Errorf("Process32First: %v", e1)
return
}
for {
if processEntry.th32ProcessID == uint32(pid) {
pe = &processEntry
return
}
ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
if ok == 0 {
err = fmt.Errorf("Process32Next: %v", e1)
return
}
}
}
func getppid() (pid int, err error) {
pe, err := getProcessEntry(os.Getpid())
if err != nil {
return
}
pid = int(pe.th32ParentProcessID)
return
}
// StartedByExplorer returns true if the program was invoked by the user double-clicking
// on the executable from explorer.exe
//
// It is conservative and returns false if any of the internal calls fail.
// It does not guarantee that the program was run from a terminal. It only can tell you
// whether it was launched from explorer.exe
func StartedByExplorer() bool {
ppid, err := getppid()
if err != nil {
return false
}
pe, err := getProcessEntry(ppid)
if err != nil {
return false
}
name := syscall.UTF16ToString(pe.szExeFile[:])
return name == "explorer.exe"
}

View File

@ -1,46 +0,0 @@
// +build windows
// +build go1.4
package mousetrap
import (
"os"
"syscall"
"unsafe"
)
func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
if err != nil {
return nil, err
}
defer syscall.CloseHandle(snapshot)
var procEntry syscall.ProcessEntry32
procEntry.Size = uint32(unsafe.Sizeof(procEntry))
if err = syscall.Process32First(snapshot, &procEntry); err != nil {
return nil, err
}
for {
if procEntry.ProcessID == uint32(pid) {
return &procEntry, nil
}
err = syscall.Process32Next(snapshot, &procEntry)
if err != nil {
return nil, err
}
}
}
// StartedByExplorer returns true if the program was invoked by the user double-clicking
// on the executable from explorer.exe
//
// It is conservative and returns false if any of the internal calls fail.
// It does not guarantee that the program was run from a terminal. It only can tell you
// whether it was launched from explorer.exe
func StartedByExplorer() bool {
pe, err := getProcessEntry(os.Getppid())
if err != nil {
return false
}
return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
}

View File

@ -1,8 +0,0 @@
*.out
*.swp
*.8
*.6
_obj
_test*
markdown
tags

View File

@ -1,17 +0,0 @@
# Travis CI (http://travis-ci.org/) is a continuous integration service for
# open source projects. This file configures it to run unit tests for
# blackfriday.
language: go
go:
- 1.2
- 1.3
- 1.4
install:
- go get -d -t -v ./...
- go build -v ./...
script:
- go test -v ./...

View File

@ -1,29 +0,0 @@
Blackfriday is distributed under the Simplified BSD License:
> Copyright © 2011 Russ Ross
> All rights reserved.
>
> Redistribution and use in source and binary forms, with or without
> modification, are permitted provided that the following conditions
> are met:
>
> 1. Redistributions of source code must retain the above copyright
> notice, this list of conditions and the following disclaimer.
>
> 2. Redistributions in binary form must reproduce the above
> copyright notice, this list of conditions and the following
> disclaimer in the documentation and/or other materials provided with
> the distribution.
>
> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
> POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,246 +0,0 @@
Blackfriday [![Build Status](https://travis-ci.org/russross/blackfriday.svg?branch=master)](https://travis-ci.org/russross/blackfriday)
===========
Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It
is paranoid about its input (so you can safely feed it user-supplied
data), it is fast, it supports common extensions (tables, smart
punctuation substitutions, etc.), and it is safe for all utf-8
(unicode) input.
HTML output is currently supported, along with Smartypants
extensions. An experimental LaTeX output engine is also included.
It started as a translation from C of [Sundown][3].
Installation
------------
Blackfriday is compatible with Go 1. If you are using an older
release of Go, consider using v1.1 of blackfriday, which was based
on the last stable release of Go prior to Go 1. You can find it as a
tagged commit on github.
With Go 1 and git installed:
go get github.com/russross/blackfriday
will download, compile, and install the package into your `$GOPATH`
directory hierarchy. Alternatively, you can achieve the same if you
import it into a project:
import "github.com/russross/blackfriday"
and `go get` without parameters.
Usage
-----
For basic usage, it is as simple as getting your input into a byte
slice and calling:
output := blackfriday.MarkdownBasic(input)
This renders it with no extensions enabled. To get a more useful
feature set, use this instead:
output := blackfriday.MarkdownCommon(input)
### Sanitize untrusted content
Blackfriday itself does nothing to protect against malicious content. If you are
dealing with user-supplied markdown, we recommend running blackfriday's output
through HTML sanitizer such as
[Bluemonday](https://github.com/microcosm-cc/bluemonday).
Here's an example of simple usage of blackfriday together with bluemonday:
``` go
import (
"github.com/microcosm-cc/bluemonday"
"github.com/russross/blackfriday"
)
// ...
unsafe := blackfriday.MarkdownCommon(input)
html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
```
### Custom options
If you want to customize the set of options, first get a renderer
(currently either the HTML or LaTeX output engines), then use it to
call the more general `Markdown` function. For examples, see the
implementations of `MarkdownBasic` and `MarkdownCommon` in
`markdown.go`.
You can also check out `blackfriday-tool` for a more complete example
of how to use it. Download and install it using:
go get github.com/russross/blackfriday-tool
This is a simple command-line tool that allows you to process a
markdown file using a standalone program. You can also browse the
source directly on github if you are just looking for some example
code:
* <http://github.com/russross/blackfriday-tool>
Note that if you have not already done so, installing
`blackfriday-tool` will be sufficient to download and install
blackfriday in addition to the tool itself. The tool binary will be
installed in `$GOPATH/bin`. This is a statically-linked binary that
can be copied to wherever you need it without worrying about
dependencies and library versions.
Features
--------
All features of Sundown are supported, including:
* **Compatibility**. The Markdown v1.0.3 test suite passes with
the `--tidy` option. Without `--tidy`, the differences are
mostly in whitespace and entity escaping, where blackfriday is
more consistent and cleaner.
* **Common extensions**, including table support, fenced code
blocks, autolinks, strikethroughs, non-strict emphasis, etc.
* **Safety**. Blackfriday is paranoid when parsing, making it safe
to feed untrusted user input without fear of bad things
happening. The test suite stress tests this and there are no
known inputs that make it crash. If you find one, please let me
know and send me the input that does it.
NOTE: "safety" in this context means *runtime safety only*. In order to
protect yourself agains JavaScript injection in untrusted content, see
[this example](https://github.com/russross/blackfriday#sanitize-untrusted-content).
* **Fast processing**. It is fast enough to render on-demand in
most web applications without having to cache the output.
* **Thread safety**. You can run multiple parsers in different
goroutines without ill effect. There is no dependence on global
shared state.
* **Minimal dependencies**. Blackfriday only depends on standard
library packages in Go. The source code is pretty
self-contained, so it is easy to add to any project, including
Google App Engine projects.
* **Standards compliant**. Output successfully validates using the
W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional.
Extensions
----------
In addition to the standard markdown syntax, this package
implements the following extensions:
* **Intra-word emphasis supression**. The `_` character is
commonly used inside words when discussing code, so having
markdown interpret it as an emphasis command is usually the
wrong thing. Blackfriday lets you treat all emphasis markers as
normal characters when they occur inside a word.
* **Tables**. Tables can be created by drawing them in the input
using a simple syntax:
```
Name | Age
--------|------
Bob | 27
Alice | 23
```
* **Fenced code blocks**. In addition to the normal 4-space
indentation to mark code blocks, you can explicitly mark them
and supply a language (to make syntax highlighting simple). Just
mark it like this:
``` go
func getTrue() bool {
return true
}
```
You can use 3 or more backticks to mark the beginning of the
block, and the same number to mark the end of the block.
* **Autolinking**. Blackfriday can find URLs that have not been
explicitly marked as links and turn them into links.
* **Strikethrough**. Use two tildes (`~~`) to mark text that
should be crossed out.
* **Hard line breaks**. With this extension enabled (it is off by
default in the `MarkdownBasic` and `MarkdownCommon` convenience
functions), newlines in the input translate into line breaks in
the output.
* **Smart quotes**. Smartypants-style punctuation substitution is
supported, turning normal double- and single-quote marks into
curly quotes, etc.
* **LaTeX-style dash parsing** is an additional option, where `--`
is translated into `&ndash;`, and `---` is translated into
`&mdash;`. This differs from most smartypants processors, which
turn a single hyphen into an ndash and a double hyphen into an
mdash.
* **Smart fractions**, where anything that looks like a fraction
is translated into suitable HTML (instead of just a few special
cases like most smartypant processors). For example, `4/5`
becomes `<sup>4</sup>&frasl;<sub>5</sub>`, which renders as
<sup>4</sup>&frasl;<sub>5</sub>.
Other renderers
---------------
Blackfriday is structured to allow alternative rendering engines. Here
are a few of note:
* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown):
provides a GitHub Flavored Markdown renderer with fenced code block
highlighting, clickable header anchor links.
It's not customizable, and its goal is to produce HTML output
equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode),
except the rendering is performed locally.
* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt,
but for markdown.
* LaTeX output: renders output as LaTeX. This is currently part of the
main Blackfriday repository, but may be split into its own project
in the future. If you are interested in owning and maintaining the
LaTeX output component, please be in touch.
It renders some basic documents, but is only experimental at this
point. In particular, it does not do any inline escaping, so input
that happens to look like LaTeX code will be passed through without
modification.
Todo
----
* More unit testing
* Improve unicode support. It does not understand all unicode
rules (about what constitutes a letter, a punctuation symbol,
etc.), so it may fail to detect word boundaries correctly in
some instances. It is safe on all utf-8 input.
License
-------
[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt)
[1]: http://daringfireball.net/projects/markdown/ "Markdown"
[2]: http://golang.org/ "Go Language"
[3]: https://github.com/vmg/sundown "Sundown"

File diff suppressed because it is too large Load Diff

View File

@ -1,948 +0,0 @@
//
// Blackfriday Markdown Processor
// Available at http://github.com/russross/blackfriday
//
// Copyright © 2011 Russ Ross <russ@russross.com>.
// Distributed under the Simplified BSD License.
// See README.md for details.
//
//
//
// HTML rendering backend
//
//
package blackfriday
import (
"bytes"
"fmt"
"regexp"
"strconv"
"strings"
)
// Html renderer configuration options.
const (
HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks
HTML_SKIP_STYLE // skip embedded <style> elements
HTML_SKIP_IMAGES // skip embedded images
HTML_SKIP_LINKS // skip all links
HTML_SAFELINK // only link to trusted protocols
HTML_NOFOLLOW_LINKS // only link with rel="nofollow"
HTML_NOREFERRER_LINKS // only link with rel="noreferrer"
HTML_HREF_TARGET_BLANK // add a blank target
HTML_TOC // generate a table of contents
HTML_OMIT_CONTENTS // skip the main contents (for a standalone table of contents)
HTML_COMPLETE_PAGE // generate a complete HTML page
HTML_USE_XHTML // generate XHTML output instead of HTML
HTML_USE_SMARTYPANTS // enable smart punctuation substitutions
HTML_SMARTYPANTS_FRACTIONS // enable smart fractions (with HTML_USE_SMARTYPANTS)
HTML_SMARTYPANTS_LATEX_DASHES // enable LaTeX-style dashes (with HTML_USE_SMARTYPANTS)
HTML_SMARTYPANTS_ANGLED_QUOTES // enable angled double quotes (with HTML_USE_SMARTYPANTS) for double quotes rendering
HTML_FOOTNOTE_RETURN_LINKS // generate a link at the end of a footnote to return to the source
)
var (
alignments = []string{
"left",
"right",
"center",
}
// TODO: improve this regexp to catch all possible entities:
htmlEntity = regexp.MustCompile(`&[a-z]{2,5};`)
)
type HtmlRendererParameters struct {
// Prepend this text to each relative URL.
AbsolutePrefix string
// Add this text to each footnote anchor, to ensure uniqueness.
FootnoteAnchorPrefix string
// Show this text inside the <a> tag for a footnote return link, if the
// HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string
// <sup>[return]</sup> is used.
FootnoteReturnLinkContents string
// If set, add this text to the front of each Header ID, to ensure
// uniqueness.
HeaderIDPrefix string
// If set, add this text to the back of each Header ID, to ensure uniqueness.
HeaderIDSuffix string
}
// Html is a type that implements the Renderer interface for HTML output.
//
// Do not create this directly, instead use the HtmlRenderer function.
type Html struct {
flags int // HTML_* options
closeTag string // how to end singleton tags: either " />" or ">"
title string // document title
css string // optional css file url (used with HTML_COMPLETE_PAGE)
parameters HtmlRendererParameters
// table of contents data
tocMarker int
headerCount int
currentLevel int
toc *bytes.Buffer
// Track header IDs to prevent ID collision in a single generation.
headerIDs map[string]int
smartypants *smartypantsRenderer
}
const (
xhtmlClose = " />"
htmlClose = ">"
)
// HtmlRenderer creates and configures an Html object, which
// satisfies the Renderer interface.
//
// flags is a set of HTML_* options ORed together.
// title is the title of the document, and css is a URL for the document's
// stylesheet.
// title and css are only used when HTML_COMPLETE_PAGE is selected.
func HtmlRenderer(flags int, title string, css string) Renderer {
return HtmlRendererWithParameters(flags, title, css, HtmlRendererParameters{})
}
func HtmlRendererWithParameters(flags int, title string,
css string, renderParameters HtmlRendererParameters) Renderer {
// configure the rendering engine
closeTag := htmlClose
if flags&HTML_USE_XHTML != 0 {
closeTag = xhtmlClose
}
if renderParameters.FootnoteReturnLinkContents == "" {
renderParameters.FootnoteReturnLinkContents = `<sup>[return]</sup>`
}
return &Html{
flags: flags,
closeTag: closeTag,
title: title,
css: css,
parameters: renderParameters,
headerCount: 0,
currentLevel: 0,
toc: new(bytes.Buffer),
headerIDs: make(map[string]int),
smartypants: smartypants(flags),
}
}
// Using if statements is a bit faster than a switch statement. As the compiler
// improves, this should be unnecessary this is only worthwhile because
// attrEscape is the single largest CPU user in normal use.
// Also tried using map, but that gave a ~3x slowdown.
func escapeSingleChar(char byte) (string, bool) {
if char == '"' {
return "&quot;", true
}
if char == '&' {
return "&amp;", true
}
if char == '<' {
return "&lt;", true
}
if char == '>' {
return "&gt;", true
}
return "", false
}
func attrEscape(out *bytes.Buffer, src []byte) {
org := 0
for i, ch := range src {
if entity, ok := escapeSingleChar(ch); ok {
if i > org {
// copy all the normal characters since the last escape
out.Write(src[org:i])
}
org = i + 1
out.WriteString(entity)
}
}
if org < len(src) {
out.Write(src[org:])
}
}
func entityEscapeWithSkip(out *bytes.Buffer, src []byte, skipRanges [][]int) {
end := 0
for _, rang := range skipRanges {
attrEscape(out, src[end:rang[0]])
out.Write(src[rang[0]:rang[1]])
end = rang[1]
}
attrEscape(out, src[end:])
}
func (options *Html) GetFlags() int {
return options.flags
}
func (options *Html) TitleBlock(out *bytes.Buffer, text []byte) {
text = bytes.TrimPrefix(text, []byte("% "))
text = bytes.Replace(text, []byte("\n% "), []byte("\n"), -1)
out.WriteString("<h1 class=\"title\">")
out.Write(text)
out.WriteString("\n</h1>")
}
func (options *Html) Header(out *bytes.Buffer, text func() bool, level int, id string) {
marker := out.Len()
doubleSpace(out)
if id == "" && options.flags&HTML_TOC != 0 {
id = fmt.Sprintf("toc_%d", options.headerCount)
}
if id != "" {
id = options.ensureUniqueHeaderID(id)
if options.parameters.HeaderIDPrefix != "" {
id = options.parameters.HeaderIDPrefix + id
}
if options.parameters.HeaderIDSuffix != "" {
id = id + options.parameters.HeaderIDSuffix
}
out.WriteString(fmt.Sprintf("<h%d id=\"%s\">", level, id))
} else {
out.WriteString(fmt.Sprintf("<h%d>", level))
}
tocMarker := out.Len()
if !text() {
out.Truncate(marker)
return
}
// are we building a table of contents?
if options.flags&HTML_TOC != 0 {
options.TocHeaderWithAnchor(out.Bytes()[tocMarker:], level, id)
}
out.WriteString(fmt.Sprintf("</h%d>\n", level))
}
func (options *Html) BlockHtml(out *bytes.Buffer, text []byte) {
if options.flags&HTML_SKIP_HTML != 0 {
return
}
doubleSpace(out)
out.Write(text)
out.WriteByte('\n')
}
func (options *Html) HRule(out *bytes.Buffer) {
doubleSpace(out)
out.WriteString("<hr")
out.WriteString(options.closeTag)
out.WriteByte('\n')
}
func (options *Html) BlockCode(out *bytes.Buffer, text []byte, lang string) {
doubleSpace(out)
// parse out the language names/classes
count := 0
for _, elt := range strings.Fields(lang) {
if elt[0] == '.' {
elt = elt[1:]
}
if len(elt) == 0 {
continue
}
if count == 0 {
out.WriteString("<pre><code class=\"language-")
} else {
out.WriteByte(' ')
}
attrEscape(out, []byte(elt))
count++
}
if count == 0 {
out.WriteString("<pre><code>")
} else {
out.WriteString("\">")
}
attrEscape(out, text)
out.WriteString("</code></pre>\n")
}
func (options *Html) BlockQuote(out *bytes.Buffer, text []byte) {
doubleSpace(out)
out.WriteString("<blockquote>\n")
out.Write(text)
out.WriteString("</blockquote>\n")
}
func (options *Html) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
doubleSpace(out)
out.WriteString("<table>\n<thead>\n")
out.Write(header)
out.WriteString("</thead>\n\n<tbody>\n")
out.Write(body)
out.WriteString("</tbody>\n</table>\n")
}
func (options *Html) TableRow(out *bytes.Buffer, text []byte) {
doubleSpace(out)
out.WriteString("<tr>\n")
out.Write(text)
out.WriteString("\n</tr>\n")
}
func (options *Html) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
doubleSpace(out)
switch align {
case TABLE_ALIGNMENT_LEFT:
out.WriteString("<th align=\"left\">")
case TABLE_ALIGNMENT_RIGHT:
out.WriteString("<th align=\"right\">")
case TABLE_ALIGNMENT_CENTER:
out.WriteString("<th align=\"center\">")
default:
out.WriteString("<th>")
}
out.Write(text)
out.WriteString("</th>")
}
func (options *Html) TableCell(out *bytes.Buffer, text []byte, align int) {
doubleSpace(out)
switch align {
case TABLE_ALIGNMENT_LEFT:
out.WriteString("<td align=\"left\">")
case TABLE_ALIGNMENT_RIGHT:
out.WriteString("<td align=\"right\">")
case TABLE_ALIGNMENT_CENTER:
out.WriteString("<td align=\"center\">")
default:
out.WriteString("<td>")
}
out.Write(text)
out.WriteString("</td>")
}
func (options *Html) Footnotes(out *bytes.Buffer, text func() bool) {
out.WriteString("<div class=\"footnotes\">\n")
options.HRule(out)
options.List(out, text, LIST_TYPE_ORDERED)
out.WriteString("</div>\n")
}
func (options *Html) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
if flags&LIST_ITEM_CONTAINS_BLOCK != 0 || flags&LIST_ITEM_BEGINNING_OF_LIST != 0 {
doubleSpace(out)
}
slug := slugify(name)
out.WriteString(`<li id="`)
out.WriteString(`fn:`)
out.WriteString(options.parameters.FootnoteAnchorPrefix)
out.Write(slug)
out.WriteString(`">`)
out.Write(text)
if options.flags&HTML_FOOTNOTE_RETURN_LINKS != 0 {
out.WriteString(` <a class="footnote-return" href="#`)
out.WriteString(`fnref:`)
out.WriteString(options.parameters.FootnoteAnchorPrefix)
out.Write(slug)
out.WriteString(`">`)
out.WriteString(options.parameters.FootnoteReturnLinkContents)
out.WriteString(`</a>`)
}
out.WriteString("</li>\n")
}
func (options *Html) List(out *bytes.Buffer, text func() bool, flags int) {
marker := out.Len()
doubleSpace(out)
if flags&LIST_TYPE_DEFINITION != 0 {
out.WriteString("<dl>")
} else if flags&LIST_TYPE_ORDERED != 0 {
out.WriteString("<ol>")
} else {
out.WriteString("<ul>")
}
if !text() {
out.Truncate(marker)
return
}
if flags&LIST_TYPE_DEFINITION != 0 {
out.WriteString("</dl>\n")
} else if flags&LIST_TYPE_ORDERED != 0 {
out.WriteString("</ol>\n")
} else {
out.WriteString("</ul>\n")
}
}
func (options *Html) ListItem(out *bytes.Buffer, text []byte, flags int) {
if (flags&LIST_ITEM_CONTAINS_BLOCK != 0 && flags&LIST_TYPE_DEFINITION == 0) ||
flags&LIST_ITEM_BEGINNING_OF_LIST != 0 {
doubleSpace(out)
}
if flags&LIST_TYPE_TERM != 0 {
out.WriteString("<dt>")
} else if flags&LIST_TYPE_DEFINITION != 0 {
out.WriteString("<dd>")
} else {
out.WriteString("<li>")
}
out.Write(text)
if flags&LIST_TYPE_TERM != 0 {
out.WriteString("</dt>\n")
} else if flags&LIST_TYPE_DEFINITION != 0 {
out.WriteString("</dd>\n")
} else {
out.WriteString("</li>\n")
}
}
func (options *Html) Paragraph(out *bytes.Buffer, text func() bool) {
marker := out.Len()
doubleSpace(out)
out.WriteString("<p>")
if !text() {
out.Truncate(marker)
return
}
out.WriteString("</p>\n")
}
func (options *Html) AutoLink(out *bytes.Buffer, link []byte, kind int) {
skipRanges := htmlEntity.FindAllIndex(link, -1)
if options.flags&HTML_SAFELINK != 0 && !isSafeLink(link) && kind != LINK_TYPE_EMAIL {
// mark it but don't link it if it is not a safe link: no smartypants
out.WriteString("<tt>")
entityEscapeWithSkip(out, link, skipRanges)
out.WriteString("</tt>")
return
}
out.WriteString("<a href=\"")
if kind == LINK_TYPE_EMAIL {
out.WriteString("mailto:")
} else {
options.maybeWriteAbsolutePrefix(out, link)
}
entityEscapeWithSkip(out, link, skipRanges)
var relAttrs []string
if options.flags&HTML_NOFOLLOW_LINKS != 0 && !isRelativeLink(link) {
relAttrs = append(relAttrs, "nofollow")
}
if options.flags&HTML_NOREFERRER_LINKS != 0 && !isRelativeLink(link) {
relAttrs = append(relAttrs, "noreferrer")
}
if len(relAttrs) > 0 {
out.WriteString(fmt.Sprintf("\" rel=\"%s", strings.Join(relAttrs, " ")))
}
// blank target only add to external link
if options.flags&HTML_HREF_TARGET_BLANK != 0 && !isRelativeLink(link) {
out.WriteString("\" target=\"_blank")
}
out.WriteString("\">")
// Pretty print: if we get an email address as
// an actual URI, e.g. `mailto:foo@bar.com`, we don't
// want to print the `mailto:` prefix
switch {
case bytes.HasPrefix(link, []byte("mailto://")):
attrEscape(out, link[len("mailto://"):])
case bytes.HasPrefix(link, []byte("mailto:")):
attrEscape(out, link[len("mailto:"):])
default:
entityEscapeWithSkip(out, link, skipRanges)
}
out.WriteString("</a>")
}
func (options *Html) CodeSpan(out *bytes.Buffer, text []byte) {
out.WriteString("<code>")
attrEscape(out, text)
out.WriteString("</code>")
}
func (options *Html) DoubleEmphasis(out *bytes.Buffer, text []byte) {
out.WriteString("<strong>")
out.Write(text)
out.WriteString("</strong>")
}
func (options *Html) Emphasis(out *bytes.Buffer, text []byte) {
if len(text) == 0 {
return
}
out.WriteString("<em>")
out.Write(text)
out.WriteString("</em>")
}
func (options *Html) maybeWriteAbsolutePrefix(out *bytes.Buffer, link []byte) {
if options.parameters.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' {
out.WriteString(options.parameters.AbsolutePrefix)
if link[0] != '/' {
out.WriteByte('/')
}
}
}
func (options *Html) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
if options.flags&HTML_SKIP_IMAGES != 0 {
return
}
out.WriteString("<img src=\"")
options.maybeWriteAbsolutePrefix(out, link)
attrEscape(out, link)
out.WriteString("\" alt=\"")
if len(alt) > 0 {
attrEscape(out, alt)
}
if len(title) > 0 {
out.WriteString("\" title=\"")
attrEscape(out, title)
}
out.WriteByte('"')
out.WriteString(options.closeTag)
}
func (options *Html) LineBreak(out *bytes.Buffer) {
out.WriteString("<br")
out.WriteString(options.closeTag)
out.WriteByte('\n')
}
func (options *Html) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
if options.flags&HTML_SKIP_LINKS != 0 {
// write the link text out but don't link it, just mark it with typewriter font
out.WriteString("<tt>")
attrEscape(out, content)
out.WriteString("</tt>")
return
}
if options.flags&HTML_SAFELINK != 0 && !isSafeLink(link) {
// write the link text out but don't link it, just mark it with typewriter font
out.WriteString("<tt>")
attrEscape(out, content)
out.WriteString("</tt>")
return
}
out.WriteString("<a href=\"")
options.maybeWriteAbsolutePrefix(out, link)
attrEscape(out, link)
if len(title) > 0 {
out.WriteString("\" title=\"")
attrEscape(out, title)
}
var relAttrs []string
if options.flags&HTML_NOFOLLOW_LINKS != 0 && !isRelativeLink(link) {
relAttrs = append(relAttrs, "nofollow")
}
if options.flags&HTML_NOREFERRER_LINKS != 0 && !isRelativeLink(link) {
relAttrs = append(relAttrs, "noreferrer")
}
if len(relAttrs) > 0 {
out.WriteString(fmt.Sprintf("\" rel=\"%s", strings.Join(relAttrs, " ")))
}
// blank target only add to external link
if options.flags&HTML_HREF_TARGET_BLANK != 0 && !isRelativeLink(link) {
out.WriteString("\" target=\"_blank")
}
out.WriteString("\">")
out.Write(content)
out.WriteString("</a>")
return
}
func (options *Html) RawHtmlTag(out *bytes.Buffer, text []byte) {
if options.flags&HTML_SKIP_HTML != 0 {
return
}
if options.flags&HTML_SKIP_STYLE != 0 && isHtmlTag(text, "style") {
return
}
if options.flags&HTML_SKIP_LINKS != 0 && isHtmlTag(text, "a") {
return
}
if options.flags&HTML_SKIP_IMAGES != 0 && isHtmlTag(text, "img") {
return
}
out.Write(text)
}
func (options *Html) TripleEmphasis(out *bytes.Buffer, text []byte) {
out.WriteString("<strong><em>")
out.Write(text)
out.WriteString("</em></strong>")
}
func (options *Html) StrikeThrough(out *bytes.Buffer, text []byte) {
out.WriteString("<del>")
out.Write(text)
out.WriteString("</del>")
}
func (options *Html) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
slug := slugify(ref)
out.WriteString(`<sup class="footnote-ref" id="`)
out.WriteString(`fnref:`)
out.WriteString(options.parameters.FootnoteAnchorPrefix)
out.Write(slug)
out.WriteString(`"><a rel="footnote" href="#`)
out.WriteString(`fn:`)
out.WriteString(options.parameters.FootnoteAnchorPrefix)
out.Write(slug)
out.WriteString(`">`)
out.WriteString(strconv.Itoa(id))
out.WriteString(`</a></sup>`)
}
func (options *Html) Entity(out *bytes.Buffer, entity []byte) {
out.Write(entity)
}
func (options *Html) NormalText(out *bytes.Buffer, text []byte) {
if options.flags&HTML_USE_SMARTYPANTS != 0 {
options.Smartypants(out, text)
} else {
attrEscape(out, text)
}
}
func (options *Html) Smartypants(out *bytes.Buffer, text []byte) {
smrt := smartypantsData{false, false}
// first do normal entity escaping
var escaped bytes.Buffer
attrEscape(&escaped, text)
text = escaped.Bytes()
mark := 0
for i := 0; i < len(text); i++ {
if action := options.smartypants[text[i]]; action != nil {
if i > mark {
out.Write(text[mark:i])
}
previousChar := byte(0)
if i > 0 {
previousChar = text[i-1]
}
i += action(out, &smrt, previousChar, text[i:])
mark = i + 1
}
}
if mark < len(text) {
out.Write(text[mark:])
}
}
func (options *Html) DocumentHeader(out *bytes.Buffer) {
if options.flags&HTML_COMPLETE_PAGE == 0 {
return
}
ending := ""
if options.flags&HTML_USE_XHTML != 0 {
out.WriteString("<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" ")
out.WriteString("\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n")
out.WriteString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n")
ending = " /"
} else {
out.WriteString("<!DOCTYPE html>\n")
out.WriteString("<html>\n")
}
out.WriteString("<head>\n")
out.WriteString(" <title>")
options.NormalText(out, []byte(options.title))
out.WriteString("</title>\n")
out.WriteString(" <meta name=\"GENERATOR\" content=\"Blackfriday Markdown Processor v")
out.WriteString(VERSION)
out.WriteString("\"")
out.WriteString(ending)
out.WriteString(">\n")
out.WriteString(" <meta charset=\"utf-8\"")
out.WriteString(ending)
out.WriteString(">\n")
if options.css != "" {
out.WriteString(" <link rel=\"stylesheet\" type=\"text/css\" href=\"")
attrEscape(out, []byte(options.css))
out.WriteString("\"")
out.WriteString(ending)
out.WriteString(">\n")
}
out.WriteString("</head>\n")
out.WriteString("<body>\n")
options.tocMarker = out.Len()
}
func (options *Html) DocumentFooter(out *bytes.Buffer) {
// finalize and insert the table of contents
if options.flags&HTML_TOC != 0 {
options.TocFinalize()
// now we have to insert the table of contents into the document
var temp bytes.Buffer
// start by making a copy of everything after the document header
temp.Write(out.Bytes()[options.tocMarker:])
// now clear the copied material from the main output buffer
out.Truncate(options.tocMarker)
// corner case spacing issue
if options.flags&HTML_COMPLETE_PAGE != 0 {
out.WriteByte('\n')
}
// insert the table of contents
out.WriteString("<nav>\n")
out.Write(options.toc.Bytes())
out.WriteString("</nav>\n")
// corner case spacing issue
if options.flags&HTML_COMPLETE_PAGE == 0 && options.flags&HTML_OMIT_CONTENTS == 0 {
out.WriteByte('\n')
}
// write out everything that came after it
if options.flags&HTML_OMIT_CONTENTS == 0 {
out.Write(temp.Bytes())
}
}
if options.flags&HTML_COMPLETE_PAGE != 0 {
out.WriteString("\n</body>\n")
out.WriteString("</html>\n")
}
}
func (options *Html) TocHeaderWithAnchor(text []byte, level int, anchor string) {
for level > options.currentLevel {
switch {
case bytes.HasSuffix(options.toc.Bytes(), []byte("</li>\n")):
// this sublist can nest underneath a header
size := options.toc.Len()
options.toc.Truncate(size - len("</li>\n"))
case options.currentLevel > 0:
options.toc.WriteString("<li>")
}
if options.toc.Len() > 0 {
options.toc.WriteByte('\n')
}
options.toc.WriteString("<ul>\n")
options.currentLevel++
}
for level < options.currentLevel {
options.toc.WriteString("</ul>")
if options.currentLevel > 1 {
options.toc.WriteString("</li>\n")
}
options.currentLevel--
}
options.toc.WriteString("<li><a href=\"#")
if anchor != "" {
options.toc.WriteString(anchor)
} else {
options.toc.WriteString("toc_")
options.toc.WriteString(strconv.Itoa(options.headerCount))
}
options.toc.WriteString("\">")
options.headerCount++
options.toc.Write(text)
options.toc.WriteString("</a></li>\n")
}
func (options *Html) TocHeader(text []byte, level int) {
options.TocHeaderWithAnchor(text, level, "")
}
func (options *Html) TocFinalize() {
for options.currentLevel > 1 {
options.toc.WriteString("</ul></li>\n")
options.currentLevel--
}
if options.currentLevel > 0 {
options.toc.WriteString("</ul>\n")
}
}
func isHtmlTag(tag []byte, tagname string) bool {
found, _ := findHtmlTagPos(tag, tagname)
return found
}
// Look for a character, but ignore it when it's in any kind of quotes, it
// might be JavaScript
func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int {
inSingleQuote := false
inDoubleQuote := false
inGraveQuote := false
i := start
for i < len(html) {
switch {
case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote:
return i
case html[i] == '\'':
inSingleQuote = !inSingleQuote
case html[i] == '"':
inDoubleQuote = !inDoubleQuote
case html[i] == '`':
inGraveQuote = !inGraveQuote
}
i++
}
return start
}
func findHtmlTagPos(tag []byte, tagname string) (bool, int) {
i := 0
if i < len(tag) && tag[0] != '<' {
return false, -1
}
i++
i = skipSpace(tag, i)
if i < len(tag) && tag[i] == '/' {
i++
}
i = skipSpace(tag, i)
j := 0
for ; i < len(tag); i, j = i+1, j+1 {
if j >= len(tagname) {
break
}
if strings.ToLower(string(tag[i]))[0] != tagname[j] {
return false, -1
}
}
if i == len(tag) {
return false, -1
}
rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>')
if rightAngle > i {
return true, rightAngle
}
return false, -1
}
func skipUntilChar(text []byte, start int, char byte) int {
i := start
for i < len(text) && text[i] != char {
i++
}
return i
}
func skipSpace(tag []byte, i int) int {
for i < len(tag) && isspace(tag[i]) {
i++
}
return i
}
func skipChar(data []byte, start int, char byte) int {
i := start
for i < len(data) && data[i] == char {
i++
}
return i
}
func doubleSpace(out *bytes.Buffer) {
if out.Len() > 0 {
out.WriteByte('\n')
}
}
func isRelativeLink(link []byte) (yes bool) {
// a tag begin with '#'
if link[0] == '#' {
return true
}
// link begin with '/' but not '//', the second maybe a protocol relative link
if len(link) >= 2 && link[0] == '/' && link[1] != '/' {
return true
}
// only the root '/'
if len(link) == 1 && link[0] == '/' {
return true
}
// current directory : begin with "./"
if bytes.HasPrefix(link, []byte("./")) {
return true
}
// parent directory : begin with "../"
if bytes.HasPrefix(link, []byte("../")) {
return true
}
return false
}
func (options *Html) ensureUniqueHeaderID(id string) string {
for count, found := options.headerIDs[id]; found; count, found = options.headerIDs[id] {
tmp := fmt.Sprintf("%s-%d", id, count+1)
if _, tmpFound := options.headerIDs[tmp]; !tmpFound {
options.headerIDs[id] = count + 1
id = tmp
} else {
id = id + "-1"
}
}
if _, found := options.headerIDs[id]; !found {
options.headerIDs[id] = 0
}
return id
}

File diff suppressed because it is too large Load Diff

View File

@ -1,332 +0,0 @@
//
// Blackfriday Markdown Processor
// Available at http://github.com/russross/blackfriday
//
// Copyright © 2011 Russ Ross <russ@russross.com>.
// Distributed under the Simplified BSD License.
// See README.md for details.
//
//
//
// LaTeX rendering backend
//
//
package blackfriday
import (
"bytes"
)
// Latex is a type that implements the Renderer interface for LaTeX output.
//
// Do not create this directly, instead use the LatexRenderer function.
type Latex struct {
}
// LatexRenderer creates and configures a Latex object, which
// satisfies the Renderer interface.
//
// flags is a set of LATEX_* options ORed together (currently no such options
// are defined).
func LatexRenderer(flags int) Renderer {
return &Latex{}
}
func (options *Latex) GetFlags() int {
return 0
}
// render code chunks using verbatim, or listings if we have a language
func (options *Latex) BlockCode(out *bytes.Buffer, text []byte, lang string) {
if lang == "" {
out.WriteString("\n\\begin{verbatim}\n")
} else {
out.WriteString("\n\\begin{lstlisting}[language=")
out.WriteString(lang)
out.WriteString("]\n")
}
out.Write(text)
if lang == "" {
out.WriteString("\n\\end{verbatim}\n")
} else {
out.WriteString("\n\\end{lstlisting}\n")
}
}
func (options *Latex) TitleBlock(out *bytes.Buffer, text []byte) {
}
func (options *Latex) BlockQuote(out *bytes.Buffer, text []byte) {
out.WriteString("\n\\begin{quotation}\n")
out.Write(text)
out.WriteString("\n\\end{quotation}\n")
}
func (options *Latex) BlockHtml(out *bytes.Buffer, text []byte) {
// a pretty lame thing to do...
out.WriteString("\n\\begin{verbatim}\n")
out.Write(text)
out.WriteString("\n\\end{verbatim}\n")
}
func (options *Latex) Header(out *bytes.Buffer, text func() bool, level int, id string) {
marker := out.Len()
switch level {
case 1:
out.WriteString("\n\\section{")
case 2:
out.WriteString("\n\\subsection{")
case 3:
out.WriteString("\n\\subsubsection{")
case 4:
out.WriteString("\n\\paragraph{")
case 5:
out.WriteString("\n\\subparagraph{")
case 6:
out.WriteString("\n\\textbf{")
}
if !text() {
out.Truncate(marker)
return
}
out.WriteString("}\n")
}
func (options *Latex) HRule(out *bytes.Buffer) {
out.WriteString("\n\\HRule\n")
}
func (options *Latex) List(out *bytes.Buffer, text func() bool, flags int) {
marker := out.Len()
if flags&LIST_TYPE_ORDERED != 0 {
out.WriteString("\n\\begin{enumerate}\n")
} else {
out.WriteString("\n\\begin{itemize}\n")
}
if !text() {
out.Truncate(marker)
return
}
if flags&LIST_TYPE_ORDERED != 0 {
out.WriteString("\n\\end{enumerate}\n")
} else {
out.WriteString("\n\\end{itemize}\n")
}
}
func (options *Latex) ListItem(out *bytes.Buffer, text []byte, flags int) {
out.WriteString("\n\\item ")
out.Write(text)
}
func (options *Latex) Paragraph(out *bytes.Buffer, text func() bool) {
marker := out.Len()
out.WriteString("\n")
if !text() {
out.Truncate(marker)
return
}
out.WriteString("\n")
}
func (options *Latex) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
out.WriteString("\n\\begin{tabular}{")
for _, elt := range columnData {
switch elt {
case TABLE_ALIGNMENT_LEFT:
out.WriteByte('l')
case TABLE_ALIGNMENT_RIGHT:
out.WriteByte('r')
default:
out.WriteByte('c')
}
}
out.WriteString("}\n")
out.Write(header)
out.WriteString(" \\\\\n\\hline\n")
out.Write(body)
out.WriteString("\n\\end{tabular}\n")
}
func (options *Latex) TableRow(out *bytes.Buffer, text []byte) {
if out.Len() > 0 {
out.WriteString(" \\\\\n")
}
out.Write(text)
}
func (options *Latex) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
if out.Len() > 0 {
out.WriteString(" & ")
}
out.Write(text)
}
func (options *Latex) TableCell(out *bytes.Buffer, text []byte, align int) {
if out.Len() > 0 {
out.WriteString(" & ")
}
out.Write(text)
}
// TODO: this
func (options *Latex) Footnotes(out *bytes.Buffer, text func() bool) {
}
func (options *Latex) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
}
func (options *Latex) AutoLink(out *bytes.Buffer, link []byte, kind int) {
out.WriteString("\\href{")
if kind == LINK_TYPE_EMAIL {
out.WriteString("mailto:")
}
out.Write(link)
out.WriteString("}{")
out.Write(link)
out.WriteString("}")
}
func (options *Latex) CodeSpan(out *bytes.Buffer, text []byte) {
out.WriteString("\\texttt{")
escapeSpecialChars(out, text)
out.WriteString("}")
}
func (options *Latex) DoubleEmphasis(out *bytes.Buffer, text []byte) {
out.WriteString("\\textbf{")
out.Write(text)
out.WriteString("}")
}
func (options *Latex) Emphasis(out *bytes.Buffer, text []byte) {
out.WriteString("\\textit{")
out.Write(text)
out.WriteString("}")
}
func (options *Latex) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
if bytes.HasPrefix(link, []byte("http://")) || bytes.HasPrefix(link, []byte("https://")) {
// treat it like a link
out.WriteString("\\href{")
out.Write(link)
out.WriteString("}{")
out.Write(alt)
out.WriteString("}")
} else {
out.WriteString("\\includegraphics{")
out.Write(link)
out.WriteString("}")
}
}
func (options *Latex) LineBreak(out *bytes.Buffer) {
out.WriteString(" \\\\\n")
}
func (options *Latex) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
out.WriteString("\\href{")
out.Write(link)
out.WriteString("}{")
out.Write(content)
out.WriteString("}")
}
func (options *Latex) RawHtmlTag(out *bytes.Buffer, tag []byte) {
}
func (options *Latex) TripleEmphasis(out *bytes.Buffer, text []byte) {
out.WriteString("\\textbf{\\textit{")
out.Write(text)
out.WriteString("}}")
}
func (options *Latex) StrikeThrough(out *bytes.Buffer, text []byte) {
out.WriteString("\\sout{")
out.Write(text)
out.WriteString("}")
}
// TODO: this
func (options *Latex) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
}
func needsBackslash(c byte) bool {
for _, r := range []byte("_{}%$&\\~#") {
if c == r {
return true
}
}
return false
}
func escapeSpecialChars(out *bytes.Buffer, text []byte) {
for i := 0; i < len(text); i++ {
// directly copy normal characters
org := i
for i < len(text) && !needsBackslash(text[i]) {
i++
}
if i > org {
out.Write(text[org:i])
}
// escape a character
if i >= len(text) {
break
}
out.WriteByte('\\')
out.WriteByte(text[i])
}
}
func (options *Latex) Entity(out *bytes.Buffer, entity []byte) {
// TODO: convert this into a unicode character or something
out.Write(entity)
}
func (options *Latex) NormalText(out *bytes.Buffer, text []byte) {
escapeSpecialChars(out, text)
}
// header and footer
func (options *Latex) DocumentHeader(out *bytes.Buffer) {
out.WriteString("\\documentclass{article}\n")
out.WriteString("\n")
out.WriteString("\\usepackage{graphicx}\n")
out.WriteString("\\usepackage{listings}\n")
out.WriteString("\\usepackage[margin=1in]{geometry}\n")
out.WriteString("\\usepackage[utf8]{inputenc}\n")
out.WriteString("\\usepackage{verbatim}\n")
out.WriteString("\\usepackage[normalem]{ulem}\n")
out.WriteString("\\usepackage{hyperref}\n")
out.WriteString("\n")
out.WriteString("\\hypersetup{colorlinks,%\n")
out.WriteString(" citecolor=black,%\n")
out.WriteString(" filecolor=black,%\n")
out.WriteString(" linkcolor=black,%\n")
out.WriteString(" urlcolor=black,%\n")
out.WriteString(" pdfstartview=FitH,%\n")
out.WriteString(" breaklinks=true,%\n")
out.WriteString(" pdfauthor={Blackfriday Markdown Processor v")
out.WriteString(VERSION)
out.WriteString("}}\n")
out.WriteString("\n")
out.WriteString("\\newcommand{\\HRule}{\\rule{\\linewidth}{0.5mm}}\n")
out.WriteString("\\addtolength{\\parskip}{0.5\\baselineskip}\n")
out.WriteString("\\parindent=0pt\n")
out.WriteString("\n")
out.WriteString("\\begin{document}\n")
}
func (options *Latex) DocumentFooter(out *bytes.Buffer) {
out.WriteString("\n\\end{document}\n")
}

View File

@ -1,919 +0,0 @@
//
// Blackfriday Markdown Processor
// Available at http://github.com/russross/blackfriday
//
// Copyright © 2011 Russ Ross <russ@russross.com>.
// Distributed under the Simplified BSD License.
// See README.md for details.
//
//
//
// Markdown parsing and processing
//
//
// Blackfriday markdown processor.
//
// Translates plain text with simple formatting rules into HTML or LaTeX.
package blackfriday
import (
"bytes"
"strings"
"unicode/utf8"
)
const VERSION = "1.1"
// These are the supported markdown parsing extensions.
// OR these values together to select multiple extensions.
const (
EXTENSION_NO_INTRA_EMPHASIS = 1 << iota // ignore emphasis markers inside words
EXTENSION_TABLES // render tables
EXTENSION_FENCED_CODE // render fenced code blocks
EXTENSION_AUTOLINK // detect embedded URLs that are not explicitly marked
EXTENSION_STRIKETHROUGH // strikethrough text using ~~test~~
EXTENSION_LAX_HTML_BLOCKS // loosen up HTML block parsing rules
EXTENSION_SPACE_HEADERS // be strict about prefix header rules
EXTENSION_HARD_LINE_BREAK // translate newlines into line breaks
EXTENSION_TAB_SIZE_EIGHT // expand tabs to eight spaces instead of four
EXTENSION_FOOTNOTES // Pandoc-style footnotes
EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block
EXTENSION_HEADER_IDS // specify header IDs with {#id}
EXTENSION_TITLEBLOCK // Titleblock ala pandoc
EXTENSION_AUTO_HEADER_IDS // Create the header ID from the text
EXTENSION_BACKSLASH_LINE_BREAK // translate trailing backslashes into line breaks
EXTENSION_DEFINITION_LISTS // render definition lists
commonHtmlFlags = 0 |
HTML_USE_XHTML |
HTML_USE_SMARTYPANTS |
HTML_SMARTYPANTS_FRACTIONS |
HTML_SMARTYPANTS_LATEX_DASHES
commonExtensions = 0 |
EXTENSION_NO_INTRA_EMPHASIS |
EXTENSION_TABLES |
EXTENSION_FENCED_CODE |
EXTENSION_AUTOLINK |
EXTENSION_STRIKETHROUGH |
EXTENSION_SPACE_HEADERS |
EXTENSION_HEADER_IDS |
EXTENSION_BACKSLASH_LINE_BREAK |
EXTENSION_DEFINITION_LISTS
)
// These are the possible flag values for the link renderer.
// Only a single one of these values will be used; they are not ORed together.
// These are mostly of interest if you are writing a new output format.
const (
LINK_TYPE_NOT_AUTOLINK = iota
LINK_TYPE_NORMAL
LINK_TYPE_EMAIL
)
// These are the possible flag values for the ListItem renderer.
// Multiple flag values may be ORed together.
// These are mostly of interest if you are writing a new output format.
const (
LIST_TYPE_ORDERED = 1 << iota
LIST_TYPE_DEFINITION
LIST_TYPE_TERM
LIST_ITEM_CONTAINS_BLOCK
LIST_ITEM_BEGINNING_OF_LIST
LIST_ITEM_END_OF_LIST
)
// These are the possible flag values for the table cell renderer.
// Only a single one of these values will be used; they are not ORed together.
// These are mostly of interest if you are writing a new output format.
const (
TABLE_ALIGNMENT_LEFT = 1 << iota
TABLE_ALIGNMENT_RIGHT
TABLE_ALIGNMENT_CENTER = (TABLE_ALIGNMENT_LEFT | TABLE_ALIGNMENT_RIGHT)
)
// The size of a tab stop.
const (
TAB_SIZE_DEFAULT = 4
TAB_SIZE_EIGHT = 8
)
// These are the tags that are recognized as HTML block tags.
// Any of these can be included in markdown text without special escaping.
var blockTags = map[string]bool{
"p": true,
"dl": true,
"h1": true,
"h2": true,
"h3": true,
"h4": true,
"h5": true,
"h6": true,
"ol": true,
"ul": true,
"del": true,
"div": true,
"ins": true,
"pre": true,
"form": true,
"math": true,
"table": true,
"iframe": true,
"script": true,
"fieldset": true,
"noscript": true,
"blockquote": true,
// HTML5
"video": true,
"aside": true,
"canvas": true,
"figure": true,
"footer": true,
"header": true,
"hgroup": true,
"output": true,
"article": true,
"section": true,
"progress": true,
"figcaption": true,
}
// Renderer is the rendering interface.
// This is mostly of interest if you are implementing a new rendering format.
//
// When a byte slice is provided, it contains the (rendered) contents of the
// element.
//
// When a callback is provided instead, it will write the contents of the
// respective element directly to the output buffer and return true on success.
// If the callback returns false, the rendering function should reset the
// output buffer as though it had never been called.
//
// Currently Html and Latex implementations are provided
type Renderer interface {
// block-level callbacks
BlockCode(out *bytes.Buffer, text []byte, lang string)
BlockQuote(out *bytes.Buffer, text []byte)
BlockHtml(out *bytes.Buffer, text []byte)
Header(out *bytes.Buffer, text func() bool, level int, id string)
HRule(out *bytes.Buffer)
List(out *bytes.Buffer, text func() bool, flags int)
ListItem(out *bytes.Buffer, text []byte, flags int)
Paragraph(out *bytes.Buffer, text func() bool)
Table(out *bytes.Buffer, header []byte, body []byte, columnData []int)
TableRow(out *bytes.Buffer, text []byte)
TableHeaderCell(out *bytes.Buffer, text []byte, flags int)
TableCell(out *bytes.Buffer, text []byte, flags int)
Footnotes(out *bytes.Buffer, text func() bool)
FootnoteItem(out *bytes.Buffer, name, text []byte, flags int)
TitleBlock(out *bytes.Buffer, text []byte)
// Span-level callbacks
AutoLink(out *bytes.Buffer, link []byte, kind int)
CodeSpan(out *bytes.Buffer, text []byte)
DoubleEmphasis(out *bytes.Buffer, text []byte)
Emphasis(out *bytes.Buffer, text []byte)
Image(out *bytes.Buffer, link []byte, title []byte, alt []byte)
LineBreak(out *bytes.Buffer)
Link(out *bytes.Buffer, link []byte, title []byte, content []byte)
RawHtmlTag(out *bytes.Buffer, tag []byte)
TripleEmphasis(out *bytes.Buffer, text []byte)
StrikeThrough(out *bytes.Buffer, text []byte)
FootnoteRef(out *bytes.Buffer, ref []byte, id int)
// Low-level callbacks
Entity(out *bytes.Buffer, entity []byte)
NormalText(out *bytes.Buffer, text []byte)
// Header and footer
DocumentHeader(out *bytes.Buffer)
DocumentFooter(out *bytes.Buffer)
GetFlags() int
}
// Callback functions for inline parsing. One such function is defined
// for each character that triggers a response when parsing inline data.
type inlineParser func(p *parser, out *bytes.Buffer, data []byte, offset int) int
// Parser holds runtime state used by the parser.
// This is constructed by the Markdown function.
type parser struct {
r Renderer
refOverride ReferenceOverrideFunc
refs map[string]*reference
inlineCallback [256]inlineParser
flags int
nesting int
maxNesting int
insideLink bool
// Footnotes need to be ordered as well as available to quickly check for
// presence. If a ref is also a footnote, it's stored both in refs and here
// in notes. Slice is nil if footnotes not enabled.
notes []*reference
}
func (p *parser) getRef(refid string) (ref *reference, found bool) {
if p.refOverride != nil {
r, overridden := p.refOverride(refid)
if overridden {
if r == nil {
return nil, false
}
return &reference{
link: []byte(r.Link),
title: []byte(r.Title),
noteId: 0,
hasBlock: false,
text: []byte(r.Text)}, true
}
}
// refs are case insensitive
ref, found = p.refs[strings.ToLower(refid)]
return ref, found
}
//
//
// Public interface
//
//
// Reference represents the details of a link.
// See the documentation in Options for more details on use-case.
type Reference struct {
// Link is usually the URL the reference points to.
Link string
// Title is the alternate text describing the link in more detail.
Title string
// Text is the optional text to override the ref with if the syntax used was
// [refid][]
Text string
}
// ReferenceOverrideFunc is expected to be called with a reference string and
// return either a valid Reference type that the reference string maps to or
// nil. If overridden is false, the default reference logic will be executed.
// See the documentation in Options for more details on use-case.
type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool)
// Options represents configurable overrides and callbacks (in addition to the
// extension flag set) for configuring a Markdown parse.
type Options struct {
// Extensions is a flag set of bit-wise ORed extension bits. See the
// EXTENSION_* flags defined in this package.
Extensions int
// ReferenceOverride is an optional function callback that is called every
// time a reference is resolved.
//
// In Markdown, the link reference syntax can be made to resolve a link to
// a reference instead of an inline URL, in one of the following ways:
//
// * [link text][refid]
// * [refid][]
//
// Usually, the refid is defined at the bottom of the Markdown document. If
// this override function is provided, the refid is passed to the override
// function first, before consulting the defined refids at the bottom. If
// the override function indicates an override did not occur, the refids at
// the bottom will be used to fill in the link details.
ReferenceOverride ReferenceOverrideFunc
}
// MarkdownBasic is a convenience function for simple rendering.
// It processes markdown input with no extensions enabled.
func MarkdownBasic(input []byte) []byte {
// set up the HTML renderer
htmlFlags := HTML_USE_XHTML
renderer := HtmlRenderer(htmlFlags, "", "")
// set up the parser
return MarkdownOptions(input, renderer, Options{Extensions: 0})
}
// Call Markdown with most useful extensions enabled
// MarkdownCommon is a convenience function for simple rendering.
// It processes markdown input with common extensions enabled, including:
//
// * Smartypants processing with smart fractions and LaTeX dashes
//
// * Intra-word emphasis suppression
//
// * Tables
//
// * Fenced code blocks
//
// * Autolinking
//
// * Strikethrough support
//
// * Strict header parsing
//
// * Custom Header IDs
func MarkdownCommon(input []byte) []byte {
// set up the HTML renderer
renderer := HtmlRenderer(commonHtmlFlags, "", "")
return MarkdownOptions(input, renderer, Options{
Extensions: commonExtensions})
}
// Markdown is the main rendering function.
// It parses and renders a block of markdown-encoded text.
// The supplied Renderer is used to format the output, and extensions dictates
// which non-standard extensions are enabled.
//
// To use the supplied Html or LaTeX renderers, see HtmlRenderer and
// LatexRenderer, respectively.
func Markdown(input []byte, renderer Renderer, extensions int) []byte {
return MarkdownOptions(input, renderer, Options{
Extensions: extensions})
}
// MarkdownOptions is just like Markdown but takes additional options through
// the Options struct.
func MarkdownOptions(input []byte, renderer Renderer, opts Options) []byte {
// no point in parsing if we can't render
if renderer == nil {
return nil
}
extensions := opts.Extensions
// fill in the render structure
p := new(parser)
p.r = renderer
p.flags = extensions
p.refOverride = opts.ReferenceOverride
p.refs = make(map[string]*reference)
p.maxNesting = 16
p.insideLink = false
// register inline parsers
p.inlineCallback['*'] = emphasis
p.inlineCallback['_'] = emphasis
if extensions&EXTENSION_STRIKETHROUGH != 0 {
p.inlineCallback['~'] = emphasis
}
p.inlineCallback['`'] = codeSpan
p.inlineCallback['\n'] = lineBreak
p.inlineCallback['['] = link
p.inlineCallback['<'] = leftAngle
p.inlineCallback['\\'] = escape
p.inlineCallback['&'] = entity
if extensions&EXTENSION_AUTOLINK != 0 {
p.inlineCallback[':'] = autoLink
}
if extensions&EXTENSION_FOOTNOTES != 0 {
p.notes = make([]*reference, 0)
}
first := firstPass(p, input)
second := secondPass(p, first)
return second
}
// first pass:
// - extract references
// - expand tabs
// - normalize newlines
// - copy everything else
// - add missing newlines before fenced code blocks
func firstPass(p *parser, input []byte) []byte {
var out bytes.Buffer
tabSize := TAB_SIZE_DEFAULT
if p.flags&EXTENSION_TAB_SIZE_EIGHT != 0 {
tabSize = TAB_SIZE_EIGHT
}
beg, end := 0, 0
lastLineWasBlank := false
lastFencedCodeBlockEnd := 0
for beg < len(input) { // iterate over lines
if end = isReference(p, input[beg:], tabSize); end > 0 {
beg += end
} else { // skip to the next line
end = beg
for end < len(input) && input[end] != '\n' && input[end] != '\r' {
end++
}
if p.flags&EXTENSION_FENCED_CODE != 0 {
// when last line was none blank and a fenced code block comes after
if beg >= lastFencedCodeBlockEnd {
if i := p.fencedCode(&out, input[beg:], false); i > 0 {
if !lastLineWasBlank {
out.WriteByte('\n') // need to inject additional linebreak
}
lastFencedCodeBlockEnd = beg + i
}
}
lastLineWasBlank = end == beg
}
// add the line body if present
if end > beg {
if end < lastFencedCodeBlockEnd { // Do not expand tabs while inside fenced code blocks.
out.Write(input[beg:end])
} else {
expandTabs(&out, input[beg:end], tabSize)
}
}
out.WriteByte('\n')
if end < len(input) && input[end] == '\r' {
end++
}
if end < len(input) && input[end] == '\n' {
end++
}
beg = end
}
}
// empty input?
if out.Len() == 0 {
out.WriteByte('\n')
}
return out.Bytes()
}
// second pass: actual rendering
func secondPass(p *parser, input []byte) []byte {
var output bytes.Buffer
p.r.DocumentHeader(&output)
p.block(&output, input)
if p.flags&EXTENSION_FOOTNOTES != 0 && len(p.notes) > 0 {
p.r.Footnotes(&output, func() bool {
flags := LIST_ITEM_BEGINNING_OF_LIST
for _, ref := range p.notes {
var buf bytes.Buffer
if ref.hasBlock {
flags |= LIST_ITEM_CONTAINS_BLOCK
p.block(&buf, ref.title)
} else {
p.inline(&buf, ref.title)
}
p.r.FootnoteItem(&output, ref.link, buf.Bytes(), flags)
flags &^= LIST_ITEM_BEGINNING_OF_LIST | LIST_ITEM_CONTAINS_BLOCK
}
return true
})
}
p.r.DocumentFooter(&output)
if p.nesting != 0 {
panic("Nesting level did not end at zero")
}
return output.Bytes()
}
//
// Link references
//
// This section implements support for references that (usually) appear
// as footnotes in a document, and can be referenced anywhere in the document.
// The basic format is:
//
// [1]: http://www.google.com/ "Google"
// [2]: http://www.github.com/ "Github"
//
// Anywhere in the document, the reference can be linked by referring to its
// label, i.e., 1 and 2 in this example, as in:
//
// This library is hosted on [Github][2], a git hosting site.
//
// Actual footnotes as specified in Pandoc and supported by some other Markdown
// libraries such as php-markdown are also taken care of. They look like this:
//
// This sentence needs a bit of further explanation.[^note]
//
// [^note]: This is the explanation.
//
// Footnotes should be placed at the end of the document in an ordered list.
// Inline footnotes such as:
//
// Inline footnotes^[Not supported.] also exist.
//
// are not yet supported.
// References are parsed and stored in this struct.
type reference struct {
link []byte
title []byte
noteId int // 0 if not a footnote ref
hasBlock bool
text []byte
}
// Check whether or not data starts with a reference link.
// If so, it is parsed and stored in the list of references
// (in the render struct).
// Returns the number of bytes to skip to move past it,
// or zero if the first line is not a reference.
func isReference(p *parser, data []byte, tabSize int) int {
// up to 3 optional leading spaces
if len(data) < 4 {
return 0
}
i := 0
for i < 3 && data[i] == ' ' {
i++
}
noteId := 0
// id part: anything but a newline between brackets
if data[i] != '[' {
return 0
}
i++
if p.flags&EXTENSION_FOOTNOTES != 0 {
if i < len(data) && data[i] == '^' {
// we can set it to anything here because the proper noteIds will
// be assigned later during the second pass. It just has to be != 0
noteId = 1
i++
}
}
idOffset := i
for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
i++
}
if i >= len(data) || data[i] != ']' {
return 0
}
idEnd := i
// spacer: colon (space | tab)* newline? (space | tab)*
i++
if i >= len(data) || data[i] != ':' {
return 0
}
i++
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
i++
}
if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
i++
if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
i++
}
}
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
i++
}
if i >= len(data) {
return 0
}
var (
linkOffset, linkEnd int
titleOffset, titleEnd int
lineEnd int
raw []byte
hasBlock bool
)
if p.flags&EXTENSION_FOOTNOTES != 0 && noteId != 0 {
linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
lineEnd = linkEnd
} else {
linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
}
if lineEnd == 0 {
return 0
}
// a valid ref has been found
ref := &reference{
noteId: noteId,
hasBlock: hasBlock,
}
if noteId > 0 {
// reusing the link field for the id since footnotes don't have links
ref.link = data[idOffset:idEnd]
// if footnote, it's not really a title, it's the contained text
ref.title = raw
} else {
ref.link = data[linkOffset:linkEnd]
ref.title = data[titleOffset:titleEnd]
}
// id matches are case-insensitive
id := string(bytes.ToLower(data[idOffset:idEnd]))
p.refs[id] = ref
return lineEnd
}
func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
// link: whitespace-free sequence, optionally between angle brackets
if data[i] == '<' {
i++
}
linkOffset = i
for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
i++
}
if i == len(data) {
return
}
linkEnd = i
if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
linkOffset++
linkEnd--
}
// optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
i++
}
if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
return
}
// compute end-of-line
if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
lineEnd = i
}
if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
lineEnd++
}
// optional (space|tab)* spacer after a newline
if lineEnd > 0 {
i = lineEnd + 1
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
i++
}
}
// optional title: any non-newline sequence enclosed in '"() alone on its line
if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
i++
titleOffset = i
// look for EOL
for i < len(data) && data[i] != '\n' && data[i] != '\r' {
i++
}
if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
titleEnd = i + 1
} else {
titleEnd = i
}
// step back
i--
for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
i--
}
if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
lineEnd = titleEnd
titleEnd = i
}
}
return
}
// The first bit of this logic is the same as (*parser).listItem, but the rest
// is much simpler. This function simply finds the entire block and shifts it
// over by one tab if it is indeed a block (just returns the line if it's not).
// blockEnd is the end of the section in the input buffer, and contents is the
// extracted text that was shifted over one tab. It will need to be rendered at
// the end of the document.
func scanFootnote(p *parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
if i == 0 || len(data) == 0 {
return
}
// skip leading whitespace on first line
for i < len(data) && data[i] == ' ' {
i++
}
blockStart = i
// find the end of the line
blockEnd = i
for i < len(data) && data[i-1] != '\n' {
i++
}
// get working buffer
var raw bytes.Buffer
// put the first line into the working buffer
raw.Write(data[blockEnd:i])
blockEnd = i
// process the following lines
containsBlankLine := false
gatherLines:
for blockEnd < len(data) {
i++
// find the end of this line
for i < len(data) && data[i-1] != '\n' {
i++
}
// if it is an empty line, guess that it is part of this item
// and move on to the next line
if p.isEmpty(data[blockEnd:i]) > 0 {
containsBlankLine = true
blockEnd = i
continue
}
n := 0
if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
// this is the end of the block.
// we don't want to include this last line in the index.
break gatherLines
}
// if there were blank lines before this one, insert a new one now
if containsBlankLine {
raw.WriteByte('\n')
containsBlankLine = false
}
// get rid of that first tab, write to buffer
raw.Write(data[blockEnd+n : i])
hasBlock = true
blockEnd = i
}
if data[blockEnd-1] != '\n' {
raw.WriteByte('\n')
}
contents = raw.Bytes()
return
}
//
//
// Miscellaneous helper functions
//
//
// Test if a character is a punctuation symbol.
// Taken from a private function in regexp in the stdlib.
func ispunct(c byte) bool {
for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
if c == r {
return true
}
}
return false
}
// Test if a character is a whitespace character.
func isspace(c byte) bool {
return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v'
}
// Test if a character is letter.
func isletter(c byte) bool {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
// Test if a character is a letter or a digit.
// TODO: check when this is looking for ASCII alnum and when it should use unicode
func isalnum(c byte) bool {
return (c >= '0' && c <= '9') || isletter(c)
}
// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
// always ends output with a newline
func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
// first, check for common cases: no tabs, or only tabs at beginning of line
i, prefix := 0, 0
slowcase := false
for i = 0; i < len(line); i++ {
if line[i] == '\t' {
if prefix == i {
prefix++
} else {
slowcase = true
break
}
}
}
// no need to decode runes if all tabs are at the beginning of the line
if !slowcase {
for i = 0; i < prefix*tabSize; i++ {
out.WriteByte(' ')
}
out.Write(line[prefix:])
return
}
// the slow case: we need to count runes to figure out how
// many spaces to insert for each tab
column := 0
i = 0
for i < len(line) {
start := i
for i < len(line) && line[i] != '\t' {
_, size := utf8.DecodeRune(line[i:])
i += size
column++
}
if i > start {
out.Write(line[start:i])
}
if i >= len(line) {
break
}
for {
out.WriteByte(' ')
column++
if column%tabSize == 0 {
break
}
}
i++
}
}
// Find if a line counts as indented or not.
// Returns number of characters the indent is (0 = not indented).
func isIndented(data []byte, indentSize int) int {
if len(data) == 0 {
return 0
}
if data[0] == '\t' {
return 1
}
if len(data) < indentSize {
return 0
}
for i := 0; i < indentSize; i++ {
if data[i] != ' ' {
return 0
}
}
return indentSize
}
// Create a url-safe slug for fragments
func slugify(in []byte) []byte {
if len(in) == 0 {
return in
}
out := make([]byte, 0, len(in))
sym := false
for _, ch := range in {
if isalnum(ch) {
sym = false
out = append(out, ch)
} else if sym {
continue
} else {
out = append(out, '-')
sym = true
}
}
var a, b int
var ch byte
for a, ch = range out {
if ch != '-' {
break
}
}
for b = len(out) - 1; b > 0; b-- {
if out[b] != '-' {
break
}
}
return out[a : b+1]
}

View File

@ -1,398 +0,0 @@
//
// Blackfriday Markdown Processor
// Available at http://github.com/russross/blackfriday
//
// Copyright © 2011 Russ Ross <russ@russross.com>.
// Distributed under the Simplified BSD License.
// See README.md for details.
//
//
//
// SmartyPants rendering
//
//
package blackfriday
import (
"bytes"
)
type smartypantsData struct {
inSingleQuote bool
inDoubleQuote bool
}
func wordBoundary(c byte) bool {
return c == 0 || isspace(c) || ispunct(c)
}
func tolower(c byte) byte {
if c >= 'A' && c <= 'Z' {
return c - 'A' + 'a'
}
return c
}
func isdigit(c byte) bool {
return c >= '0' && c <= '9'
}
func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool) bool {
// edge of the buffer is likely to be a tag that we don't get to see,
// so we treat it like text sometimes
// enumerate all sixteen possibilities for (previousChar, nextChar)
// each can be one of {0, space, punct, other}
switch {
case previousChar == 0 && nextChar == 0:
// context is not any help here, so toggle
*isOpen = !*isOpen
case isspace(previousChar) && nextChar == 0:
// [ "] might be [ "<code>foo...]
*isOpen = true
case ispunct(previousChar) && nextChar == 0:
// [!"] hmm... could be [Run!"] or [("<code>...]
*isOpen = false
case /* isnormal(previousChar) && */ nextChar == 0:
// [a"] is probably a close
*isOpen = false
case previousChar == 0 && isspace(nextChar):
// [" ] might be [...foo</code>" ]
*isOpen = false
case isspace(previousChar) && isspace(nextChar):
// [ " ] context is not any help here, so toggle
*isOpen = !*isOpen
case ispunct(previousChar) && isspace(nextChar):
// [!" ] is probably a close
*isOpen = false
case /* isnormal(previousChar) && */ isspace(nextChar):
// [a" ] this is one of the easy cases
*isOpen = false
case previousChar == 0 && ispunct(nextChar):
// ["!] hmm... could be ["$1.95] or [</code>"!...]
*isOpen = false
case isspace(previousChar) && ispunct(nextChar):
// [ "!] looks more like [ "$1.95]
*isOpen = true
case ispunct(previousChar) && ispunct(nextChar):
// [!"!] context is not any help here, so toggle
*isOpen = !*isOpen
case /* isnormal(previousChar) && */ ispunct(nextChar):
// [a"!] is probably a close
*isOpen = false
case previousChar == 0 /* && isnormal(nextChar) */ :
// ["a] is probably an open
*isOpen = true
case isspace(previousChar) /* && isnormal(nextChar) */ :
// [ "a] this is one of the easy cases
*isOpen = true
case ispunct(previousChar) /* && isnormal(nextChar) */ :
// [!"a] is probably an open
*isOpen = true
default:
// [a'b] maybe a contraction?
*isOpen = false
}
out.WriteByte('&')
if *isOpen {
out.WriteByte('l')
} else {
out.WriteByte('r')
}
out.WriteByte(quote)
out.WriteString("quo;")
return true
}
func smartSingleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
if len(text) >= 2 {
t1 := tolower(text[1])
if t1 == '\'' {
nextChar := byte(0)
if len(text) >= 3 {
nextChar = text[2]
}
if smartQuoteHelper(out, previousChar, nextChar, 'd', &smrt.inDoubleQuote) {
return 1
}
}
if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) {
out.WriteString("&rsquo;")
return 0
}
if len(text) >= 3 {
t2 := tolower(text[2])
if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) &&
(len(text) < 4 || wordBoundary(text[3])) {
out.WriteString("&rsquo;")
return 0
}
}
}
nextChar := byte(0)
if len(text) > 1 {
nextChar = text[1]
}
if smartQuoteHelper(out, previousChar, nextChar, 's', &smrt.inSingleQuote) {
return 0
}
out.WriteByte(text[0])
return 0
}
func smartParens(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
if len(text) >= 3 {
t1 := tolower(text[1])
t2 := tolower(text[2])
if t1 == 'c' && t2 == ')' {
out.WriteString("&copy;")
return 2
}
if t1 == 'r' && t2 == ')' {
out.WriteString("&reg;")
return 2
}
if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' {
out.WriteString("&trade;")
return 3
}
}
out.WriteByte(text[0])
return 0
}
func smartDash(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
if len(text) >= 2 {
if text[1] == '-' {
out.WriteString("&mdash;")
return 1
}
if wordBoundary(previousChar) && wordBoundary(text[1]) {
out.WriteString("&ndash;")
return 0
}
}
out.WriteByte(text[0])
return 0
}
func smartDashLatex(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
if len(text) >= 3 && text[1] == '-' && text[2] == '-' {
out.WriteString("&mdash;")
return 2
}
if len(text) >= 2 && text[1] == '-' {
out.WriteString("&ndash;")
return 1
}
out.WriteByte(text[0])
return 0
}
func smartAmpVariant(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte, quote byte) int {
if bytes.HasPrefix(text, []byte("&quot;")) {
nextChar := byte(0)
if len(text) >= 7 {
nextChar = text[6]
}
if smartQuoteHelper(out, previousChar, nextChar, quote, &smrt.inDoubleQuote) {
return 5
}
}
if bytes.HasPrefix(text, []byte("&#0;")) {
return 3
}
out.WriteByte('&')
return 0
}
func smartAmp(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
return smartAmpVariant(out, smrt, previousChar, text, 'd')
}
func smartAmpAngledQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
return smartAmpVariant(out, smrt, previousChar, text, 'a')
}
func smartPeriod(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
if len(text) >= 3 && text[1] == '.' && text[2] == '.' {
out.WriteString("&hellip;")
return 2
}
if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' {
out.WriteString("&hellip;")
return 4
}
out.WriteByte(text[0])
return 0
}
func smartBacktick(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
if len(text) >= 2 && text[1] == '`' {
nextChar := byte(0)
if len(text) >= 3 {
nextChar = text[2]
}
if smartQuoteHelper(out, previousChar, nextChar, 'd', &smrt.inDoubleQuote) {
return 1
}
}
out.WriteByte(text[0])
return 0
}
func smartNumberGeneric(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
// is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b
// note: check for regular slash (/) or fraction slash (, 0x2044, or 0xe2 81 84 in utf-8)
// and avoid changing dates like 1/23/2005 into fractions.
numEnd := 0
for len(text) > numEnd && isdigit(text[numEnd]) {
numEnd++
}
if numEnd == 0 {
out.WriteByte(text[0])
return 0
}
denStart := numEnd + 1
if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 {
denStart = numEnd + 3
} else if len(text) < numEnd+2 || text[numEnd] != '/' {
out.WriteByte(text[0])
return 0
}
denEnd := denStart
for len(text) > denEnd && isdigit(text[denEnd]) {
denEnd++
}
if denEnd == denStart {
out.WriteByte(text[0])
return 0
}
if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' {
out.WriteString("<sup>")
out.Write(text[:numEnd])
out.WriteString("</sup>&frasl;<sub>")
out.Write(text[denStart:denEnd])
out.WriteString("</sub>")
return denEnd - 1
}
}
out.WriteByte(text[0])
return 0
}
func smartNumber(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
if text[0] == '1' && text[1] == '/' && text[2] == '2' {
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' {
out.WriteString("&frac12;")
return 2
}
}
if text[0] == '1' && text[1] == '/' && text[2] == '4' {
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') {
out.WriteString("&frac14;")
return 2
}
}
if text[0] == '3' && text[1] == '/' && text[2] == '4' {
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') {
out.WriteString("&frac34;")
return 2
}
}
}
out.WriteByte(text[0])
return 0
}
func smartDoubleQuoteVariant(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte, quote byte) int {
nextChar := byte(0)
if len(text) > 1 {
nextChar = text[1]
}
if !smartQuoteHelper(out, previousChar, nextChar, quote, &smrt.inDoubleQuote) {
out.WriteString("&quot;")
}
return 0
}
func smartDoubleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
return smartDoubleQuoteVariant(out, smrt, previousChar, text, 'd')
}
func smartAngledDoubleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
return smartDoubleQuoteVariant(out, smrt, previousChar, text, 'a')
}
func smartLeftAngle(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
i := 0
for i < len(text) && text[i] != '>' {
i++
}
out.Write(text[:i+1])
return i
}
type smartCallback func(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int
type smartypantsRenderer [256]smartCallback
func smartypants(flags int) *smartypantsRenderer {
r := new(smartypantsRenderer)
if flags&HTML_SMARTYPANTS_ANGLED_QUOTES == 0 {
r['"'] = smartDoubleQuote
r['&'] = smartAmp
} else {
r['"'] = smartAngledDoubleQuote
r['&'] = smartAmpAngledQuote
}
r['\''] = smartSingleQuote
r['('] = smartParens
if flags&HTML_SMARTYPANTS_LATEX_DASHES == 0 {
r['-'] = smartDash
} else {
r['-'] = smartDashLatex
}
r['.'] = smartPeriod
if flags&HTML_SMARTYPANTS_FRACTIONS == 0 {
r['1'] = smartNumber
r['3'] = smartNumber
} else {
for ch := '1'; ch <= '9'; ch++ {
r[ch] = smartNumberGeneric
}
}
r['<'] = smartLeftAngle
r['`'] = smartBacktick
return r
}

View File

@ -1,10 +0,0 @@
language: go
go:
- 1.5
install:
- go get golang.org/x/tools/cmd/vet
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d ./)
- go tool vet ./
- go test -v -race ./...

View File

@ -1,31 +0,0 @@
# sanitized_anchor_name [![Build Status](https://travis-ci.org/shurcooL/sanitized_anchor_name.svg?branch=master)](https://travis-ci.org/shurcooL/sanitized_anchor_name) [![GoDoc](https://godoc.org/github.com/shurcooL/sanitized_anchor_name?status.svg)](https://godoc.org/github.com/shurcooL/sanitized_anchor_name)
Package sanitized_anchor_name provides a func to create sanitized anchor names.
Its logic can be reused by multiple packages to create interoperable anchor names and links to those anchors.
At this time, it does not try to ensure that generated anchor names are unique, that responsibility falls on the caller.
Installation
------------
```bash
go get -u github.com/shurcooL/sanitized_anchor_name
```
Example
-------
```Go
anchorName := sanitized_anchor_name.Create("This is a header")
fmt.Println(anchorName)
// Output:
// this-is-a-header
```
License
-------
- [MIT License](http://opensource.org/licenses/mit-license.php)

View File

@ -1,29 +0,0 @@
// Package sanitized_anchor_name provides a func to create sanitized anchor names.
//
// Its logic can be reused by multiple packages to create interoperable anchor names
// and links to those anchors.
//
// At this time, it does not try to ensure that generated anchor names
// are unique, that responsibility falls on the caller.
package sanitized_anchor_name
import "unicode"
// Create returns a sanitized anchor name for the given text.
func Create(text string) string {
var anchorName []rune
var futureDash = false
for _, r := range []rune(text) {
switch {
case unicode.IsLetter(r) || unicode.IsNumber(r):
if futureDash && len(anchorName) > 0 {
anchorName = append(anchorName, '-')
}
futureDash = false
anchorName = append(anchorName, unicode.ToLower(r))
default:
futureDash = true
}
}
return string(anchorName)
}

View File

@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
cobra.test

View File

@ -1,9 +0,0 @@
language: go
go:
- 1.3.3
- 1.4.2
- 1.5.1
- tip
script:
- go test ./...
- go build

View File

@ -1,174 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

View File

@ -1,525 +0,0 @@
# Cobra
A Commander for modern go CLI interactions
[![Build Status](https://travis-ci.org/spf13/cobra.svg)](https://travis-ci.org/spf13/cobra)
## Overview
Cobra is a commander providing a simple interface to create powerful modern CLI
interfaces similar to git & go tools. In addition to providing an interface, Cobra
simultaneously provides a controller to organize your application code.
Inspired by go, go-Commander, gh and subcommand, Cobra improves on these by
providing **fully posix compliant flags** (including short & long versions),
**nesting commands**, and the ability to **define your own help and usage** for any or
all commands.
Cobra has an exceptionally clean interface and simple design without needless
constructors or initialization methods.
Applications built with Cobra commands are designed to be as user friendly as
possible. Flags can be placed before or after the command (as long as a
confusing space isnt provided). Both short and long flags can be used. A
command need not even be fully typed. The shortest unambiguous string will
suffice. Help is automatically generated and available for the application or
for a specific command using either the help command or the --help flag.
## Concepts
Cobra is built on a structure of commands & flags.
**Commands** represent actions and **Flags** are modifiers for those actions.
In the following example 'server' is a command and 'port' is a flag.
hugo server --port=1313
### Commands
Command is the central point of the application. Each interaction that
the application supports will be contained in a Command. A command can
have children commands and optionally run an action.
In the example above 'server' is the command
A Command has the following structure:
type Command struct {
Use string // The one-line usage message.
Short string // The short description shown in the 'help' output.
Long string // The long message shown in the 'help <this-command>' output.
Run func(cmd *Command, args []string) // Run runs the command.
}
### Flags
A Flag is a way to modify the behavior of an command. Cobra supports
fully posix compliant flags as well as the go flag package.
A Cobra command can define flags that persist through to children commands
and flags that are only available to that command.
In the example above 'port' is the flag.
Flag functionality is provided by the [pflag
library](https://github.com/ogier/pflag), a fork of the flag standard library
which maintains the same interface while adding posix compliance.
## Usage
Cobra works by creating a set of commands and then organizing them into a tree.
The tree defines the structure of the application.
Once each command is defined with it's corresponding flags, then the
tree is assigned to the commander which is finally executed.
### Installing
Using Cobra is easy. First use go get to install the latest version
of the library.
$ go get github.com/spf13/cobra
Next include cobra in your application.
import "github.com/spf13/cobra"
### Create the root command
The root command represents your binary itself.
Cobra doesn't require any special constructors. Simply create your commands.
var HugoCmd = &cobra.Command{
Use: "hugo",
Short: "Hugo is a very fast static site generator",
Long: `A Fast and Flexible Static Site Generator built with
love by spf13 and friends in Go.
Complete documentation is available at http://hugo.spf13.com`,
Run: func(cmd *cobra.Command, args []string) {
// Do Stuff Here
},
}
### Create additional commands
Additional commands can be defined.
var versionCmd = &cobra.Command{
Use: "version",
Short: "Print the version number of Hugo",
Long: `All software has versions. This is Hugo's`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
},
}
### Attach command to its parent
In this example we are attaching it to the root, but commands can be attached at any level.
HugoCmd.AddCommand(versionCmd)
### Assign flags to a command
Since the flags are defined and used in different locations, we need to
define a variable outside with the correct scope to assign the flag to
work with.
var Verbose bool
var Source string
There are two different approaches to assign a flag.
#### Persistent Flags
A flag can be 'persistent' meaning that this flag will be available to the
command it's assigned to as well as every command under that command. For
global flags assign a flag as a persistent flag on the root.
HugoCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output")
#### Local Flags
A flag can also be assigned locally which will only apply to that specific command.
HugoCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
### Remove a command from its parent
Removing a command is not a common action in simple programs but it allows 3rd parties to customize an existing command tree.
In this example, we remove the existing `VersionCmd` command of an existing root command, and we replace it by our own version.
mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd)
mainlib.RootCmd.AddCommand(versionCmd)
### Once all commands and flags are defined, Execute the commands
Execute should be run on the root for clarity, though it can be called on any command.
HugoCmd.Execute()
## Example
In the example below we have defined three commands. Two are at the top level
and one (cmdTimes) is a child of one of the top commands. In this case the root
is not executable meaning that a subcommand is required. This is accomplished
by not providing a 'Run' for the 'rootCmd'.
We have only defined one flag for a single command.
More documentation about flags is available at https://github.com/spf13/pflag
import(
"github.com/spf13/cobra"
"fmt"
"strings"
)
func main() {
var echoTimes int
var cmdPrint = &cobra.Command{
Use: "print [string to print]",
Short: "Print anything to the screen",
Long: `print is for printing anything back to the screen.
For many years people have printed back to the screen.
`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("Print: " + strings.Join(args, " "))
},
}
var cmdEcho = &cobra.Command{
Use: "echo [string to echo]",
Short: "Echo anything to the screen",
Long: `echo is for echoing anything back.
Echo works a lot like print, except it has a child command.
`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("Print: " + strings.Join(args, " "))
},
}
var cmdTimes = &cobra.Command{
Use: "times [# times] [string to echo]",
Short: "Echo anything to the screen more times",
Long: `echo things multiple times back to the user by providing
a count and a string.`,
Run: func(cmd *cobra.Command, args []string) {
for i:=0; i < echoTimes; i++ {
fmt.Println("Echo: " + strings.Join(args, " "))
}
},
}
cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input")
var rootCmd = &cobra.Command{Use: "app"}
rootCmd.AddCommand(cmdPrint, cmdEcho)
cmdEcho.AddCommand(cmdTimes)
rootCmd.Execute()
}
For a more complete example of a larger application, please checkout [Hugo](http://hugo.spf13.com)
## The Help Command
Cobra automatically adds a help command to your application when you have subcommands.
This will be called when a user runs 'app help'. Additionally help will also
support all other commands as input. Say for instance you have a command called
'create' without any additional configuration cobra will work when 'app help
create' is called. Every command will automatically have the '--help' flag added.
### Example
The following output is automatically generated by cobra. Nothing beyond the
command and flag definitions are needed.
> hugo help
A Fast and Flexible Static Site Generator built with
love by spf13 and friends in Go.
Complete documentation is available at http://hugo.spf13.com
Usage:
hugo [flags]
hugo [command]
Available Commands:
server :: Hugo runs it's own a webserver to render the files
version :: Print the version number of Hugo
check :: Check content in the source directory
benchmark :: Benchmark hugo by building a site a number of times
help [command] :: Help about any command
Available Flags:
-b, --base-url="": hostname (and path) to the root eg. http://spf13.com/
-D, --build-drafts=false: include content marked as draft
--config="": config file (default is path/config.yaml|json|toml)
-d, --destination="": filesystem path to write files to
-s, --source="": filesystem path to read files relative from
--stepAnalysis=false: display memory and timing of different steps of the program
--uglyurls=false: if true, use /filename.html instead of /filename/
-v, --verbose=false: verbose output
-w, --watch=false: watch filesystem for changes and recreate as needed
Use "hugo help [command]" for more information about that command.
Help is just a command like any other. There is no special logic or behavior
around it. In fact you can provide your own if you want.
### Defining your own help
You can provide your own Help command or you own template for the default command to use.
The default help command is
func (c *Command) initHelp() {
if c.helpCommand == nil {
c.helpCommand = &Command{
Use: "help [command]",
Short: "Help about any command",
Long: `Help provides help for any command in the application.
Simply type ` + c.Name() + ` help [path to command] for full details.`,
Run: c.HelpFunc(),
}
}
c.AddCommand(c.helpCommand)
}
You can provide your own command, function or template through the following methods.
command.SetHelpCommand(cmd *Command)
command.SetHelpFunc(f func(*Command, []string))
command.SetHelpTemplate(s string)
The latter two will also apply to any children commands.
## Usage
When the user provides an invalid flag or invalid command Cobra responds by
showing the user the 'usage'
### Example
You may recognize this from the help above. That's because the default help
embeds the usage as part of it's output.
Usage:
hugo [flags]
hugo [command]
Available Commands:
server Hugo runs it's own a webserver to render the files
version Print the version number of Hugo
check Check content in the source directory
benchmark Benchmark hugo by building a site a number of times
help [command] Help about any command
Available Flags:
-b, --base-url="": hostname (and path) to the root eg. http://spf13.com/
-D, --build-drafts=false: include content marked as draft
--config="": config file (default is path/config.yaml|json|toml)
-d, --destination="": filesystem path to write files to
-s, --source="": filesystem path to read files relative from
--stepAnalysis=false: display memory and timing of different steps of the program
--uglyurls=false: if true, use /filename.html instead of /filename/
-v, --verbose=false: verbose output
-w, --watch=false: watch filesystem for changes and recreate as needed
### Defining your own usage
You can provide your own usage function or template for cobra to use.
The default usage function is
return func(c *Command) error {
err := tmpl(c.Out(), c.UsageTemplate(), c)
return err
}
Like help the function and template are over ridable through public methods.
command.SetUsageFunc(f func(*Command) error)
command.SetUsageTemplate(s string)
## PreRun or PostRun Hooks
It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistendPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherrited by children if they do not declare their own. These function are run in the following order:
- `PersistentPreRun`
- `PreRun`
- `Run`
- `PostRun`
- `PersistenPostRun`
And example of two commands which use all of these features is below. When the subcommand in executed it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`
```go
package main
import (
"fmt"
"github.com/spf13/cobra"
)
func main() {
var rootCmd = &cobra.Command{
Use: "root [sub]",
Short: "My root command",
PersistentPreRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args)
},
PreRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside rootCmd PreRun with args: %v\n", args)
},
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside rootCmd Run with args: %v\n", args)
},
PostRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside rootCmd PostRun with args: %v\n", args)
},
PersistentPostRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args)
},
}
var subCmd = &cobra.Command{
Use: "sub [no options!]",
Short: "My sub command",
PreRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside subCmd PreRun with args: %v\n", args)
},
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside subCmd Run with args: %v\n", args)
},
PostRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside subCmd PostRun with args: %v\n", args)
},
PersistentPostRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args)
},
}
rootCmd.AddCommand(subCmd)
rootCmd.SetArgs([]string{""})
_ = rootCmd.Execute()
fmt.Print("\n")
rootCmd.SetArgs([]string{"sub", "arg1", "arg2"})
_ = rootCmd.Execute()
}
```
## Suggestions when "unknown command" happens
Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behavior similarly to the `git` command when a typo happens. For example:
```
$ hugo srever
unknown command "srever" for "hugo"
Did you mean this?
server
Run 'hugo --help' for usage.
```
Suggestions are automatic based on every subcommand registered and use an implementation of Levenshtein distance. Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
If you need to disable suggestions or tweak the string distance in your command, use:
command.DisableSuggestions = true
or
command.SuggestionsMinimumDistance = 1
You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example:
```
$ hugo delete
unknown command "delete" for "hugo"
Did you mean this?
remove
Run 'hugo --help' for usage.
```
## Generating markdown formatted documentation for your command
Cobra can generate a markdown formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](md_docs.md)
## Generating man pages for your command
Cobra can generate a man page based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Man Docs](man_docs.md)
## Generating bash completions for your command
Cobra can generate a bash completions file. If you add more information to your command these completions can be amazingly powerful and flexible. Read more about [Bash Completions](bash_completions.md)
## Debugging
Cobra provides a DebugFlags method on a command which when called will print
out everything Cobra knows about the flags for each command
### Example
command.DebugFlags()
## Release Notes
* **0.9.0** June 17, 2014
* flags can appears anywhere in the args (provided they are unambiguous)
* --help prints usage screen for app or command
* Prefix matching for commands
* Cleaner looking help and usage output
* Extensive test suite
* **0.8.0** Nov 5, 2013
* Reworked interface to remove commander completely
* Command now primary structure
* No initialization needed
* Usage & Help templates & functions definable at any level
* Updated Readme
* **0.7.0** Sept 24, 2013
* Needs more eyes
* Test suite
* Support for automatic error messages
* Support for help command
* Support for printing to any io.Writer instead of os.Stderr
* Support for persistent flags which cascade down tree
* Ready for integration into Hugo
* **0.1.0** Sept 3, 2013
* Implement first draft
## ToDo
* Launch proper documentation site
## Contributing
1. Fork it
2. Create your feature branch (`git checkout -b my-new-feature`)
3. Commit your changes (`git commit -am 'Add some feature'`)
4. Push to the branch (`git push origin my-new-feature`)
5. Create new Pull Request
## Contributors
Names in no particular order:
* [spf13](https://github.com/spf13)
## License
Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt)
[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge")

View File

@ -1,398 +0,0 @@
package cobra
import (
"bytes"
"fmt"
"os"
"sort"
"strings"
"github.com/mikefarah/yaml/Godeps/_workspace/src/github.com/spf13/pflag"
)
const (
BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extentions"
BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
)
func preamble(out *bytes.Buffer) {
fmt.Fprintf(out, `#!/bin/bash
__debug()
{
if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
fi
}
# Homebrew on Macs have version 1.3 of bash-completion which doesn't include
# _init_completion. This is a very minimal version of that function.
__my_init_completion()
{
COMPREPLY=()
_get_comp_words_by_ref cur prev words cword
}
__index_of_word()
{
local w word=$1
shift
index=0
for w in "$@"; do
[[ $w = "$word" ]] && return
index=$((index+1))
done
index=-1
}
__contains_word()
{
local w word=$1; shift
for w in "$@"; do
[[ $w = "$word" ]] && return
done
return 1
}
__handle_reply()
{
__debug "${FUNCNAME}"
case $cur in
-*)
compopt -o nospace
local allflags
if [ ${#must_have_one_flag[@]} -ne 0 ]; then
allflags=("${must_have_one_flag[@]}")
else
allflags=("${flags[*]} ${two_word_flags[*]}")
fi
COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") )
[[ $COMPREPLY == *= ]] || compopt +o nospace
return 0;
;;
esac
# check if we are handling a flag with special work handling
local index
__index_of_word "${prev}" "${flags_with_completion[@]}"
if [[ ${index} -ge 0 ]]; then
${flags_completion[${index}]}
return
fi
# we are parsing a flag and don't have a special handler, no completion
if [[ ${cur} != "${words[cword]}" ]]; then
return
fi
local completions
if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
completions=("${must_have_one_flag[@]}")
elif [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
completions=("${must_have_one_noun[@]}")
else
completions=("${commands[@]}")
fi
COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") )
if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
declare -F __custom_func >/dev/null && __custom_func
fi
}
# The arguments should be in the form "ext1|ext2|extn"
__handle_filename_extension_flag()
{
local ext="$1"
_filedir "@(${ext})"
}
__handle_subdirs_in_dir_flag()
{
local dir="$1"
pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1
}
__handle_flag()
{
__debug "${FUNCNAME}: c is $c words[c] is ${words[c]}"
# if a command required a flag, and we found it, unset must_have_one_flag()
local flagname=${words[c]}
# if the word contained an =
if [[ ${words[c]} == *"="* ]]; then
flagname=${flagname%%=*} # strip everything after the =
flagname="${flagname}=" # but put the = back
fi
__debug "${FUNCNAME}: looking for ${flagname}"
if __contains_word "${flagname}" "${must_have_one_flag[@]}"; then
must_have_one_flag=()
fi
# skip the argument to a two word flag
if __contains_word "${words[c]}" "${two_word_flags[@]}"; then
c=$((c+1))
# if we are looking for a flags value, don't show commands
if [[ $c -eq $cword ]]; then
commands=()
fi
fi
# skip the flag itself
c=$((c+1))
}
__handle_noun()
{
__debug "${FUNCNAME}: c is $c words[c] is ${words[c]}"
if __contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
must_have_one_noun=()
fi
nouns+=("${words[c]}")
c=$((c+1))
}
__handle_command()
{
__debug "${FUNCNAME}: c is $c words[c] is ${words[c]}"
local next_command
if [[ -n ${last_command} ]]; then
next_command="_${last_command}_${words[c]}"
else
next_command="_${words[c]}"
fi
c=$((c+1))
__debug "${FUNCNAME}: looking for ${next_command}"
declare -F $next_command >/dev/null && $next_command
}
__handle_word()
{
if [[ $c -ge $cword ]]; then
__handle_reply
return
fi
__debug "${FUNCNAME}: c is $c words[c] is ${words[c]}"
if [[ "${words[c]}" == -* ]]; then
__handle_flag
elif __contains_word "${words[c]}" "${commands[@]}"; then
__handle_command
else
__handle_noun
fi
__handle_word
}
`)
}
func postscript(out *bytes.Buffer, name string) {
fmt.Fprintf(out, "__start_%s()\n", name)
fmt.Fprintf(out, `{
local cur prev words cword
if declare -F _init_completions >/dev/null 2>&1; then
_init_completion -s || return
else
__my_init_completion || return
fi
local c=0
local flags=()
local two_word_flags=()
local flags_with_completion=()
local flags_completion=()
local commands=("%s")
local must_have_one_flag=()
local must_have_one_noun=()
local last_command
local nouns=()
__handle_word
}
`, name)
fmt.Fprintf(out, "complete -F __start_%s %s\n", name, name)
fmt.Fprintf(out, "# ex: ts=4 sw=4 et filetype=sh\n")
}
func writeCommands(cmd *Command, out *bytes.Buffer) {
fmt.Fprintf(out, " commands=()\n")
for _, c := range cmd.Commands() {
if !c.IsAvailableCommand() || c == cmd.helpCommand {
continue
}
fmt.Fprintf(out, " commands+=(%q)\n", c.Name())
}
fmt.Fprintf(out, "\n")
}
func writeFlagHandler(name string, annotations map[string][]string, out *bytes.Buffer) {
for key, value := range annotations {
switch key {
case BashCompFilenameExt:
fmt.Fprintf(out, " flags_with_completion+=(%q)\n", name)
if len(value) > 0 {
ext := "__handle_filename_extension_flag " + strings.Join(value, "|")
fmt.Fprintf(out, " flags_completion+=(%q)\n", ext)
} else {
ext := "_filedir"
fmt.Fprintf(out, " flags_completion+=(%q)\n", ext)
}
case BashCompSubdirsInDir:
fmt.Fprintf(out, " flags_with_completion+=(%q)\n", name)
if len(value) == 1 {
ext := "__handle_subdirs_in_dir_flag " + value[0]
fmt.Fprintf(out, " flags_completion+=(%q)\n", ext)
} else {
ext := "_filedir -d"
fmt.Fprintf(out, " flags_completion+=(%q)\n", ext)
}
}
}
}
func writeShortFlag(flag *pflag.Flag, out *bytes.Buffer) {
b := (flag.Value.Type() == "bool")
name := flag.Shorthand
format := " "
if !b {
format += "two_word_"
}
format += "flags+=(\"-%s\")\n"
fmt.Fprintf(out, format, name)
writeFlagHandler("-"+name, flag.Annotations, out)
}
func writeFlag(flag *pflag.Flag, out *bytes.Buffer) {
b := (flag.Value.Type() == "bool")
name := flag.Name
format := " flags+=(\"--%s"
if !b {
format += "="
}
format += "\")\n"
fmt.Fprintf(out, format, name)
writeFlagHandler("--"+name, flag.Annotations, out)
}
func writeFlags(cmd *Command, out *bytes.Buffer) {
fmt.Fprintf(out, ` flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
`)
cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
writeFlag(flag, out)
if len(flag.Shorthand) > 0 {
writeShortFlag(flag, out)
}
})
fmt.Fprintf(out, "\n")
}
func writeRequiredFlag(cmd *Command, out *bytes.Buffer) {
fmt.Fprintf(out, " must_have_one_flag=()\n")
flags := cmd.NonInheritedFlags()
flags.VisitAll(func(flag *pflag.Flag) {
for key := range flag.Annotations {
switch key {
case BashCompOneRequiredFlag:
format := " must_have_one_flag+=(\"--%s"
b := (flag.Value.Type() == "bool")
if !b {
format += "="
}
format += "\")\n"
fmt.Fprintf(out, format, flag.Name)
if len(flag.Shorthand) > 0 {
fmt.Fprintf(out, " must_have_one_flag+=(\"-%s\")\n", flag.Shorthand)
}
}
}
})
}
func writeRequiredNoun(cmd *Command, out *bytes.Buffer) {
fmt.Fprintf(out, " must_have_one_noun=()\n")
sort.Sort(sort.StringSlice(cmd.ValidArgs))
for _, value := range cmd.ValidArgs {
fmt.Fprintf(out, " must_have_one_noun+=(%q)\n", value)
}
}
func gen(cmd *Command, out *bytes.Buffer) {
for _, c := range cmd.Commands() {
if !c.IsAvailableCommand() || c == cmd.helpCommand {
continue
}
gen(c, out)
}
commandName := cmd.CommandPath()
commandName = strings.Replace(commandName, " ", "_", -1)
fmt.Fprintf(out, "_%s()\n{\n", commandName)
fmt.Fprintf(out, " last_command=%q\n", commandName)
writeCommands(cmd, out)
writeFlags(cmd, out)
writeRequiredFlag(cmd, out)
writeRequiredNoun(cmd, out)
fmt.Fprintf(out, "}\n\n")
}
func (cmd *Command) GenBashCompletion(out *bytes.Buffer) {
preamble(out)
if len(cmd.BashCompletionFunction) > 0 {
fmt.Fprintf(out, "%s\n", cmd.BashCompletionFunction)
}
gen(cmd, out)
postscript(out, cmd.Name())
}
func (cmd *Command) GenBashCompletionFile(filename string) error {
out := new(bytes.Buffer)
cmd.GenBashCompletion(out)
outFile, err := os.Create(filename)
if err != nil {
return err
}
defer outFile.Close()
_, err = outFile.Write(out.Bytes())
if err != nil {
return err
}
return nil
}
// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag, if it exists.
func (cmd *Command) MarkFlagRequired(name string) error {
return MarkFlagRequired(cmd.Flags(), name)
}
// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag in the flag set, if it exists.
func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
}
// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists.
// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
func (cmd *Command) MarkFlagFilename(name string, extensions ...string) error {
return MarkFlagFilename(cmd.Flags(), name, extensions...)
}
// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists.
// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error {
return flags.SetAnnotation(name, BashCompFilenameExt, extensions)
}

View File

@ -1,149 +0,0 @@
# Generating Bash Completions For Your Own cobra.Command
Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows:
```go
package main
import (
"io/ioutil"
"os"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd"
)
func main() {
kubectl := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, ioutil.Discard, ioutil.Discard)
kubectl.GenBashCompletionFile("out.sh")
}
```
That will get you completions of subcommands and flags. If you make additional annotations to your code, you can get even more intelligent and flexible behavior.
## Creating your own custom functions
Some more actual code that works in kubernetes:
```bash
const (
bash_completion_func = `__kubectl_parse_get()
{
local kubectl_output out
if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then
out=($(echo "${kubectl_output}" | awk '{print $1}'))
COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) )
fi
}
__kubectl_get_resource()
{
if [[ ${#nouns[@]} -eq 0 ]]; then
return 1
fi
__kubectl_parse_get ${nouns[${#nouns[@]} -1]}
if [[ $? -eq 0 ]]; then
return 0
fi
}
__custom_func() {
case ${last_command} in
kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop)
__kubectl_get_resource
return
;;
*)
;;
esac
}
`)
```
And then I set that in my command definition:
```go
cmds := &cobra.Command{
Use: "kubectl",
Short: "kubectl controls the Kubernetes cluster manager",
Long: `kubectl controls the Kubernetes cluster manager.
Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`,
Run: runHelp,
BashCompletionFunction: bash_completion_func,
}
```
The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods!
## Have the completions code complete your 'nouns'
In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like:
```go
validArgs []string = { "pods", "nodes", "services", "replicationControllers" }
cmd := &cobra.Command{
Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
Short: "Display one or many resources",
Long: get_long,
Example: get_example,
Run: func(cmd *cobra.Command, args []string) {
err := RunGet(f, out, cmd, args)
util.CheckErr(err)
},
ValidArgs: validArgs,
}
```
Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like
```bash
# kubectl get [tab][tab]
nodes pods replicationControllers services
```
## Mark flags as required
Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy.
```go
cmd.MarkFlagRequired("pod")
cmd.MarkFlagRequired("container")
```
and you'll get something like
```bash
# kubectl exec [tab][tab][tab]
-c --container= -p --pod=
```
# Specify valid filename extensions for flags that take a filename
In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions.
```go
annotations := []string{"json", "yaml", "yml"}
annotation := make(map[string][]string)
annotation[cobra.BashCompFilenameExt] = annotations
flag := &pflag.Flag{
Name: "filename",
Shorthand: "f",
Usage: usage,
Value: value,
DefValue: value.String(),
Annotations: annotation,
}
cmd.Flags().AddFlag(flag)
```
Now when you run a command with this filename flag you'll get something like
```bash
# kubectl create -f
test/ example/ rpmbuild/
hello.yml test.json
```
So while there are many other files in the CWD it only shows me subdirs and those with valid extensions.

View File

@ -1,170 +0,0 @@
// Copyright © 2013 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Commands similar to git, go tools and other modern CLI tools
// inspired by go, go-Commander, gh and subcommand
package cobra
import (
"fmt"
"io"
"reflect"
"strconv"
"strings"
"text/template"
"unicode"
)
var templateFuncs template.FuncMap = template.FuncMap{
"trim": strings.TrimSpace,
"trimRightSpace": trimRightSpace,
"rpad": rpad,
"gt": Gt,
"eq": Eq,
}
var initializers []func()
// automatic prefix matching can be a dangerous thing to automatically enable in CLI tools.
// Set this to true to enable it
var EnablePrefixMatching bool = false
// enables an information splash screen on Windows if the CLI is started from explorer.exe.
var EnableWindowsMouseTrap bool = true
var MousetrapHelpText string = `This is a command line tool
You need to open cmd.exe and run it from there.
`
//AddTemplateFunc adds a template function that's available to Usage and Help
//template generation.
func AddTemplateFunc(name string, tmplFunc interface{}) {
templateFuncs[name] = tmplFunc
}
//AddTemplateFuncs adds multiple template functions availalble to Usage and
//Help template generation.
func AddTemplateFuncs(tmplFuncs template.FuncMap) {
for k, v := range tmplFuncs {
templateFuncs[k] = v
}
}
//OnInitialize takes a series of func() arguments and appends them to a slice of func().
func OnInitialize(y ...func()) {
for _, x := range y {
initializers = append(initializers, x)
}
}
//Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
//Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
//ints and then compared.
func Gt(a interface{}, b interface{}) bool {
var left, right int64
av := reflect.ValueOf(a)
switch av.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
left = int64(av.Len())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
left = av.Int()
case reflect.String:
left, _ = strconv.ParseInt(av.String(), 10, 64)
}
bv := reflect.ValueOf(b)
switch bv.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
right = int64(bv.Len())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
right = bv.Int()
case reflect.String:
right, _ = strconv.ParseInt(bv.String(), 10, 64)
}
return left > right
}
//Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
func Eq(a interface{}, b interface{}) bool {
av := reflect.ValueOf(a)
bv := reflect.ValueOf(b)
switch av.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
panic("Eq called on unsupported type")
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return av.Int() == bv.Int()
case reflect.String:
return av.String() == bv.String()
}
return false
}
func trimRightSpace(s string) string {
return strings.TrimRightFunc(s, unicode.IsSpace)
}
//rpad adds padding to the right of a string
func rpad(s string, padding int) string {
template := fmt.Sprintf("%%-%ds", padding)
return fmt.Sprintf(template, s)
}
// tmpl executes the given template text on data, writing the result to w.
func tmpl(w io.Writer, text string, data interface{}) error {
t := template.New("top")
t.Funcs(templateFuncs)
template.Must(t.Parse(text))
return t.Execute(w, data)
}
// ld compares two strings and returns the levenshtein distance between them
func ld(s, t string, ignoreCase bool) int {
if ignoreCase {
s = strings.ToLower(s)
t = strings.ToLower(t)
}
d := make([][]int, len(s)+1)
for i := range d {
d[i] = make([]int, len(t)+1)
}
for i := range d {
d[i][0] = i
}
for j := range d[0] {
d[0][j] = j
}
for j := 1; j <= len(t); j++ {
for i := 1; i <= len(s); i++ {
if s[i-1] == t[j-1] {
d[i][j] = d[i-1][j-1]
} else {
min := d[i-1][j]
if d[i][j-1] < min {
min = d[i][j-1]
}
if d[i-1][j-1] < min {
min = d[i-1][j-1]
}
d[i][j] = min + 1
}
}
}
return d[len(s)][len(t)]
}

File diff suppressed because it is too large Load Diff

View File

@ -1,34 +0,0 @@
// Copyright 2015 Red Hat Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cobra
// Test to see if we have a reason to print See Also information in docs
// Basically this is a test for a parent commend or a subcommand which is
// both not deprecated and not the autogenerated help command.
func (cmd *Command) hasSeeAlso() bool {
if cmd.HasParent() {
return true
}
children := cmd.Commands()
if len(children) == 0 {
return false
}
for _, c := range children {
if !c.IsAvailableCommand() || c == cmd.helpCommand {
continue
}
return true
}
return false
}

View File

@ -1,213 +0,0 @@
// Copyright 2015 Red Hat Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cobra
import (
"bytes"
"fmt"
"os"
"sort"
"strings"
"time"
mangen "github.com/mikefarah/yaml/Godeps/_workspace/src/github.com/cpuguy83/go-md2man/md2man"
"github.com/mikefarah/yaml/Godeps/_workspace/src/github.com/spf13/pflag"
)
// GenManTree will call cmd.GenManTree(header, dir)
func GenManTree(cmd *Command, header *GenManHeader, dir string) {
cmd.GenManTree(header, dir)
}
// GenManTree will generate a man page for this command and all decendants
// in the directory given. The header may be nil. This function may not work
// correctly if your command names have - in them. If you have `cmd` with two
// subcmds, `sub` and `sub-third`. And `sub` has a subcommand called `third`
// it is undefined which help output will be in the file `cmd-sub-third.1`.
func (cmd *Command) GenManTree(header *GenManHeader, dir string) {
if header == nil {
header = &GenManHeader{}
}
for _, c := range cmd.Commands() {
if !c.IsAvailableCommand() || c == cmd.helpCommand {
continue
}
GenManTree(c, header, dir)
}
out := new(bytes.Buffer)
cmd.GenMan(header, out)
filename := cmd.CommandPath()
filename = dir + strings.Replace(filename, " ", "-", -1) + ".1"
outFile, err := os.Create(filename)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer outFile.Close()
_, err = outFile.Write(out.Bytes())
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
// GenManHeader is a lot like the .TH header at the start of man pages. These
// include the title, section, date, source, and manual. We will use the
// current time if Date if unset and will use "Auto generated by spf13/cobra"
// if the Source is unset.
type GenManHeader struct {
Title string
Section string
Date *time.Time
date string
Source string
Manual string
}
// GenMan will call cmd.GenMan(header, out)
func GenMan(cmd *Command, header *GenManHeader, out *bytes.Buffer) {
cmd.GenMan(header, out)
}
// GenMan will generate a man page for the given command in the out buffer.
// The header argument may be nil, however obviously out may not.
func (cmd *Command) GenMan(header *GenManHeader, out *bytes.Buffer) {
if header == nil {
header = &GenManHeader{}
}
buf := genMarkdown(cmd, header)
final := mangen.Render(buf)
out.Write(final)
}
func fillHeader(header *GenManHeader, name string) {
if header.Title == "" {
header.Title = name
}
if header.Section == "" {
header.Section = "1"
}
if header.Date == nil {
now := time.Now()
header.Date = &now
}
header.date = (*header.Date).Format("Jan 2006")
if header.Source == "" {
header.Source = "Auto generated by spf13/cobra"
}
}
func manPreamble(out *bytes.Buffer, header *GenManHeader, name, short, long string) {
fmt.Fprintf(out, `%% %s(%s)%s
%% %s
%% %s
# NAME
`, header.Title, header.Section, header.date, header.Source, header.Manual)
fmt.Fprintf(out, "%s \\- %s\n\n", name, short)
fmt.Fprintf(out, "# SYNOPSIS\n")
fmt.Fprintf(out, "**%s** [OPTIONS]\n\n", name)
fmt.Fprintf(out, "# DESCRIPTION\n")
fmt.Fprintf(out, "%s\n\n", long)
}
func manPrintFlags(out *bytes.Buffer, flags *pflag.FlagSet) {
flags.VisitAll(func(flag *pflag.Flag) {
if len(flag.Deprecated) > 0 || flag.Hidden {
return
}
format := ""
if len(flag.Shorthand) > 0 {
format = "**-%s**, **--%s**"
} else {
format = "%s**--%s**"
}
if len(flag.NoOptDefVal) > 0 {
format = format + "["
}
if flag.Value.Type() == "string" {
// put quotes on the value
format = format + "=%q"
} else {
format = format + "=%s"
}
if len(flag.NoOptDefVal) > 0 {
format = format + "]"
}
format = format + "\n\t%s\n\n"
fmt.Fprintf(out, format, flag.Shorthand, flag.Name, flag.DefValue, flag.Usage)
})
}
func manPrintOptions(out *bytes.Buffer, command *Command) {
flags := command.NonInheritedFlags()
if flags.HasFlags() {
fmt.Fprintf(out, "# OPTIONS\n")
manPrintFlags(out, flags)
fmt.Fprintf(out, "\n")
}
flags = command.InheritedFlags()
if flags.HasFlags() {
fmt.Fprintf(out, "# OPTIONS INHERITED FROM PARENT COMMANDS\n")
manPrintFlags(out, flags)
fmt.Fprintf(out, "\n")
}
}
func genMarkdown(cmd *Command, header *GenManHeader) []byte {
fillHeader(header, cmd.Name())
// something like `rootcmd subcmd1 subcmd2`
commandName := cmd.CommandPath()
// something like `rootcmd-subcmd1-subcmd2`
dashCommandName := strings.Replace(commandName, " ", "-", -1)
buf := new(bytes.Buffer)
short := cmd.Short
long := cmd.Long
if len(long) == 0 {
long = short
}
manPreamble(buf, header, commandName, short, long)
manPrintOptions(buf, cmd)
if len(cmd.Example) > 0 {
fmt.Fprintf(buf, "# EXAMPLE\n")
fmt.Fprintf(buf, "```\n%s\n```\n", cmd.Example)
}
if cmd.hasSeeAlso() {
fmt.Fprintf(buf, "# SEE ALSO\n")
if cmd.HasParent() {
parentPath := cmd.Parent().CommandPath()
dashParentPath := strings.Replace(parentPath, " ", "-", -1)
fmt.Fprintf(buf, "**%s(%s)**, ", dashParentPath, header.Section)
}
children := cmd.Commands()
sort.Sort(byName(children))
for _, c := range children {
if !c.IsAvailableCommand() || c == cmd.helpCommand {
continue
}
fmt.Fprintf(buf, "**%s-%s(%s)**, ", dashCommandName, c.Name(), header.Section)
}
fmt.Fprintf(buf, "\n")
}
fmt.Fprintf(buf, "# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006"))
return buf.Bytes()
}

View File

@ -1,25 +0,0 @@
# Generating Man Pages For Your Own cobra.Command
Generating bash completions from a cobra command is incredibly easy. An example is as follows:
```go
package main
import (
"github.com/spf13/cobra"
)
func main() {
cmd := &cobra.Command{
Use: "test",
Short: "my test program",
}
header := &cobra.GenManHeader{
Title: "MINE",
Section: "3",
}
cmd.GenManTree(header, "/tmp")
}
```
That will get you a man page `/tmp/test.1`

View File

@ -1,157 +0,0 @@
//Copyright 2015 Red Hat Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cobra
import (
"bytes"
"fmt"
"os"
"sort"
"strings"
"time"
)
func printOptions(out *bytes.Buffer, cmd *Command, name string) {
flags := cmd.NonInheritedFlags()
flags.SetOutput(out)
if flags.HasFlags() {
fmt.Fprintf(out, "### Options\n\n```\n")
flags.PrintDefaults()
fmt.Fprintf(out, "```\n\n")
}
parentFlags := cmd.InheritedFlags()
parentFlags.SetOutput(out)
if parentFlags.HasFlags() {
fmt.Fprintf(out, "### Options inherited from parent commands\n\n```\n")
parentFlags.PrintDefaults()
fmt.Fprintf(out, "```\n\n")
}
}
type byName []*Command
func (s byName) Len() int { return len(s) }
func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
func GenMarkdown(cmd *Command, out *bytes.Buffer) {
cmd.GenMarkdown(out)
}
func (cmd *Command) GenMarkdown(out *bytes.Buffer) {
cmd.GenMarkdownCustom(out, func(s string) string { return s })
}
func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) {
cmd.GenMarkdownCustom(out, linkHandler)
}
func (cmd *Command) GenMarkdownCustom(out *bytes.Buffer, linkHandler func(string) string) {
name := cmd.CommandPath()
short := cmd.Short
long := cmd.Long
if len(long) == 0 {
long = short
}
fmt.Fprintf(out, "## %s\n\n", name)
fmt.Fprintf(out, "%s\n\n", short)
fmt.Fprintf(out, "### Synopsis\n\n")
fmt.Fprintf(out, "\n%s\n\n", long)
if cmd.Runnable() {
fmt.Fprintf(out, "```\n%s\n```\n\n", cmd.UseLine())
}
if len(cmd.Example) > 0 {
fmt.Fprintf(out, "### Examples\n\n")
fmt.Fprintf(out, "```\n%s\n```\n\n", cmd.Example)
}
printOptions(out, cmd, name)
if cmd.hasSeeAlso() {
fmt.Fprintf(out, "### SEE ALSO\n")
if cmd.HasParent() {
parent := cmd.Parent()
pname := parent.CommandPath()
link := pname + ".md"
link = strings.Replace(link, " ", "_", -1)
fmt.Fprintf(out, "* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short)
}
children := cmd.Commands()
sort.Sort(byName(children))
for _, child := range children {
if !child.IsAvailableCommand() || child == cmd.helpCommand {
continue
}
cname := name + " " + child.Name()
link := cname + ".md"
link = strings.Replace(link, " ", "_", -1)
fmt.Fprintf(out, "* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short)
}
fmt.Fprintf(out, "\n")
}
fmt.Fprintf(out, "###### Auto generated by spf13/cobra on %s\n", time.Now().Format("2-Jan-2006"))
}
func GenMarkdownTree(cmd *Command, dir string) {
cmd.GenMarkdownTree(dir)
}
func (cmd *Command) GenMarkdownTree(dir string) {
identity := func(s string) string { return s }
emptyStr := func(s string) string { return "" }
cmd.GenMarkdownTreeCustom(dir, emptyStr, identity)
}
func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string) string) {
cmd.GenMarkdownTreeCustom(dir, filePrepender, linkHandler)
}
func (cmd *Command) GenMarkdownTreeCustom(dir string, filePrepender func(string) string, linkHandler func(string) string) {
for _, c := range cmd.Commands() {
if !c.IsAvailableCommand() || c == cmd.helpCommand {
continue
}
c.GenMarkdownTreeCustom(dir, filePrepender, linkHandler)
}
out := new(bytes.Buffer)
cmd.GenMarkdownCustom(out, linkHandler)
filename := cmd.CommandPath()
filename = dir + strings.Replace(filename, " ", "_", -1) + ".md"
outFile, err := os.Create(filename)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer outFile.Close()
_, err = outFile.WriteString(filePrepender(filename))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
_, err = outFile.Write(out.Bytes())
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}

View File

@ -1,81 +0,0 @@
# Generating Markdown Docs For Your Own cobra.Command
## Generate markdown docs for the entire command tree
This program can actually generate docs for the kubectl command in the kubernetes project
```go
package main
import (
"io/ioutil"
"os"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd"
"github.com/spf13/cobra"
)
func main() {
kubectl := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, ioutil.Discard, ioutil.Discard)
cobra.GenMarkdownTree(kubectl, "./")
}
```
This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./")
## Generate markdown docs for a single command
You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenMarkdown` instead of `GenMarkdownTree`
```go
out := new(bytes.Buffer)
cobra.GenMarkdown(cmd, out)
```
This will write the markdown doc for ONLY "cmd" into the out, buffer.
## Customize the output
Both `GenMarkdown` and `GenMarkdownTree` have alternate versions with callbacks to get some control of the output:
```go
func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string) string) {
//...
}
```
```go
func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) {
//...
}
```
The `filePrepender` will prepend the return value given the full filepath to the rendered Markdown file. A common use case is to add front matter to use the generated documentation with [Hugo](http://gohugo.io/):
```go
const fmTemplate = `---
date: %s
title: "%s"
slug: %s
url: %s
---
`
filePrepender := func(filename string) string {
now := time.Now().Format(time.RFC3339)
name := filepath.Base(filename)
base := strings.TrimSuffix(name, path.Ext(name))
url := "/commands/" + strings.ToLower(base) + "/"
return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url)
}
```
The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename:
```go
linkHandler := func(name string) string {
base := strings.TrimSuffix(name, path.Ext(name))
return "/commands/" + strings.ToLower(base) + "/"
}
```

View File

@ -1,17 +0,0 @@
sudo: false
language: go
go:
- 1.3
- 1.4
- tip
install:
- go get github.com/golang/lint/golint
- export PATH=$GOPATH/bin:$PATH
- go install ./...
script:
- verify/all.sh
- go test ./...

View File

@ -1,28 +0,0 @@
Copyright (c) 2012 Alex Ogier. All rights reserved.
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,256 +0,0 @@
[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag)
## Description
pflag is a drop-in replacement for Go's flag package, implementing
POSIX/GNU-style --flags.
pflag is compatible with the [GNU extensions to the POSIX recommendations
for command-line options][1]. For a more precise description, see the
"Command-line flag syntax" section below.
[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
pflag is available under the same style of BSD license as the Go language,
which can be found in the LICENSE file.
## Installation
pflag is available using the standard `go get` command.
Install by running:
go get github.com/spf13/pflag
Run tests by running:
go test github.com/spf13/pflag
## Usage
pflag is a drop-in replacement of Go's native flag package. If you import
pflag under the name "flag" then all code should continue to function
with no changes.
``` go
import flag "github.com/spf13/pflag"
```
There is one exception to this: if you directly instantiate the Flag struct
there is one more field "Shorthand" that you will need to set.
Most code never instantiates this struct directly, and instead uses
functions such as String(), BoolVar(), and Var(), and is therefore
unaffected.
Define flags using flag.String(), Bool(), Int(), etc.
This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
``` go
var ip *int = flag.Int("flagname", 1234, "help message for flagname")
```
If you like, you can bind the flag to a variable using the Var() functions.
``` go
var flagvar int
func init() {
flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
}
```
Or you can create custom flags that satisfy the Value interface (with
pointer receivers) and couple them to flag parsing by
``` go
flag.Var(&flagVal, "name", "help message for flagname")
```
For such flags, the default value is just the initial value of the variable.
After all flags are defined, call
``` go
flag.Parse()
```
to parse the command line into the defined flags.
Flags may then be used directly. If you're using the flags themselves,
they are all pointers; if you bind to variables, they're values.
``` go
fmt.Println("ip has value ", *ip)
fmt.Println("flagvar has value ", flagvar)
```
There are helpers function to get values later if you have the FlagSet but
it was difficult to keep up with all of the the flag pointers in your code.
If you have a pflag.FlagSet with a flag called 'flagname' of type int you
can use GetInt() to get the int value. But notice that 'flagname' must exist
and it must be an int. GetString("flagname") will fail.
``` go
i, err := flagset.GetInt("flagname")
```
After parsing, the arguments after the flag are available as the
slice flag.Args() or individually as flag.Arg(i).
The arguments are indexed from 0 through flag.NArg()-1.
The pflag package also defines some new functions that are not in flag,
that give one-letter shorthands for flags. You can use these by appending
'P' to the name of any function that defines a flag.
``` go
var ip = flag.IntP("flagname", "f", 1234, "help message")
var flagvar bool
func init() {
flag.BoolVarP("boolname", "b", true, "help message")
}
flag.VarP(&flagVar, "varname", "v", 1234, "help message")
```
Shorthand letters can be used with single dashes on the command line.
Boolean shorthand flags can be combined with other shorthand flags.
The default set of command-line flags is controlled by
top-level functions. The FlagSet type allows one to define
independent sets of flags, such as to implement subcommands
in a command-line interface. The methods of FlagSet are
analogous to the top-level functions for the command-line
flag set.
## Setting no option default values for flags
After you create a flag it is possible to set the pflag.NoOptDefVal for
the given flag. Doing this changes the meaning of the flag slightly. If
a flag has a NoOptDefVal and the flag is set on the command line without
an option the flag will be set to the NoOptDefVal. For example given:
``` go
var ip = flag.IntP("flagname", "f", 1234, "help message")
flag.Lookup("flagname").NoOptDefVal = "4321"
```
Would result in something like
| Parsed Arguments | Resulting Value |
| ------------- | ------------- |
| --flagname=1357 | ip=1357 |
| --flagname | ip=4321 |
| [nothing] | ip=1234 |
## Command line flag syntax
```
--flag // boolean flags, or flags with no option default values
--flag x // only on flags without a default value
--flag=x
```
Unlike the flag package, a single dash before an option means something
different than a double dash. Single dashes signify a series of shorthand
letters for flags. All but the last shorthand letter must be boolean flags
or a flag with a default value
```
// boolean or flags where the 'no option default value' is set
-f
-f=true
-abc
but
-b true is INVALID
// non-boolean and flags without a 'no option default value'
-n 1234
-n=1234
-n1234
// mixed
-abcs "hello"
-absd="hello"
-abcs1234
```
Flag parsing stops after the terminator "--". Unlike the flag package,
flags can be interspersed with arguments anywhere on the command line
before this terminator.
Integer flags accept 1234, 0664, 0x1234 and may be negative.
Boolean flags (in their long form) accept 1, 0, t, f, true, false,
TRUE, FALSE, True, False.
Duration flags accept any input valid for time.ParseDuration.
## Mutating or "Normalizing" Flag names
It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow.
**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag
``` go
func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
from := []string{"-", "_"}
to := "."
for _, sep := range from {
name = strings.Replace(name, sep, to, -1)
}
return pflag.NormalizedName(name)
}
myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc)
```
**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name
``` go
func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
switch name {
case "old-flag-name":
name = "new-flag-name"
break
}
return pflag.NormalizedName(name)
}
myFlagSet.SetNormalizeFunc(aliasNormalizeFunc)
```
## Deprecating a flag or its shorthand
It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used.
**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead.
```go
// deprecate a flag by specifying its name and a usage message
flags.MarkDeprecated("badflag", "please use --good-flag instead")
```
This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used.
**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n".
```go
// deprecate a flag shorthand by specifying its flag name and a usage message
flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only")
```
This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used.
Note that usage message is essential here, and it should not be empty.
## Hidden flags
It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text.
**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available.
```go
// hide a flag by specifying its name
flags.MarkHidden("secretFlag")
```
## More info
You can see the full reference documentation of the pflag package
[at godoc.org][3], or through go's standard documentation system by
running `godoc -http=:6060` and browsing to
[http://localhost:6060/pkg/github.com/ogier/pflag][2] after
installation.
[2]: http://localhost:6060/pkg/github.com/ogier/pflag
[3]: http://godoc.org/github.com/ogier/pflag

View File

@ -1,97 +0,0 @@
package pflag
import (
"fmt"
"strconv"
)
// optional interface to indicate boolean flags that can be
// supplied without "=value" text
type boolFlag interface {
Value
IsBoolFlag() bool
}
// -- bool Value
type boolValue bool
func newBoolValue(val bool, p *bool) *boolValue {
*p = val
return (*boolValue)(p)
}
func (b *boolValue) Set(s string) error {
v, err := strconv.ParseBool(s)
*b = boolValue(v)
return err
}
func (b *boolValue) Type() string {
return "bool"
}
func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) }
func (b *boolValue) IsBoolFlag() bool { return true }
func boolConv(sval string) (interface{}, error) {
return strconv.ParseBool(sval)
}
// GetBool return the bool value of a flag with the given name
func (f *FlagSet) GetBool(name string) (bool, error) {
val, err := f.getFlagType(name, "bool", boolConv)
if err != nil {
return false, err
}
return val.(bool), nil
}
// BoolVar defines a bool flag with specified name, default value, and usage string.
// The argument p points to a bool variable in which to store the value of the flag.
func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
f.BoolVarP(p, name, "", value, usage)
}
// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage)
flag.NoOptDefVal = "true"
}
// BoolVar defines a bool flag with specified name, default value, and usage string.
// The argument p points to a bool variable in which to store the value of the flag.
func BoolVar(p *bool, name string, value bool, usage string) {
BoolVarP(p, name, "", value, usage)
}
// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
func BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage)
flag.NoOptDefVal = "true"
}
// Bool defines a bool flag with specified name, default value, and usage string.
// The return value is the address of a bool variable that stores the value of the flag.
func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
return f.BoolP(name, "", value, usage)
}
// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool {
p := new(bool)
f.BoolVarP(p, name, shorthand, value, usage)
return p
}
// Bool defines a bool flag with specified name, default value, and usage string.
// The return value is the address of a bool variable that stores the value of the flag.
func Bool(name string, value bool, usage string) *bool {
return BoolP(name, "", value, usage)
}
// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
func BoolP(name, shorthand string, value bool, usage string) *bool {
b := CommandLine.BoolP(name, shorthand, value, usage)
return b
}

View File

@ -1,97 +0,0 @@
package pflag
import (
"fmt"
"strconv"
)
// -- count Value
type countValue int
func newCountValue(val int, p *int) *countValue {
*p = val
return (*countValue)(p)
}
func (i *countValue) Set(s string) error {
v, err := strconv.ParseInt(s, 0, 64)
// -1 means that no specific value was passed, so increment
if v == -1 {
*i = countValue(*i + 1)
} else {
*i = countValue(v)
}
return err
}
func (i *countValue) Type() string {
return "count"
}
func (i *countValue) String() string { return fmt.Sprintf("%v", *i) }
func countConv(sval string) (interface{}, error) {
i, err := strconv.Atoi(sval)
if err != nil {
return nil, err
}
return i, nil
}
// GetCount return the int value of a flag with the given name
func (f *FlagSet) GetCount(name string) (int, error) {
val, err := f.getFlagType(name, "count", countConv)
if err != nil {
return 0, err
}
return val.(int), nil
}
// CountVar defines a count flag with specified name, default value, and usage string.
// The argument p points to an int variable in which to store the value of the flag.
// A count flag will add 1 to its value evey time it is found on the command line
func (f *FlagSet) CountVar(p *int, name string, usage string) {
f.CountVarP(p, name, "", usage)
}
// CountVarP is like CountVar only take a shorthand for the flag name.
func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) {
flag := f.VarPF(newCountValue(0, p), name, shorthand, usage)
flag.NoOptDefVal = "-1"
}
// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set
func CountVar(p *int, name string, usage string) {
CommandLine.CountVar(p, name, usage)
}
// CountVarP is like CountVar only take a shorthand for the flag name.
func CountVarP(p *int, name, shorthand string, usage string) {
CommandLine.CountVarP(p, name, shorthand, usage)
}
// Count defines a count flag with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the flag.
// A count flag will add 1 to its value evey time it is found on the command line
func (f *FlagSet) Count(name string, usage string) *int {
p := new(int)
f.CountVarP(p, name, "", usage)
return p
}
// CountP is like Count only takes a shorthand for the flag name.
func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
p := new(int)
f.CountVarP(p, name, shorthand, usage)
return p
}
// Count like Count only the flag is placed on the CommandLine isntead of a given flag set
func Count(name string, usage string) *int {
return CommandLine.CountP(name, "", usage)
}
// CountP is like Count only takes a shorthand for the flag name.
func CountP(name, shorthand string, usage string) *int {
return CommandLine.CountP(name, shorthand, usage)
}

View File

@ -1,86 +0,0 @@
package pflag
import (
"time"
)
// -- time.Duration Value
type durationValue time.Duration
func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
*p = val
return (*durationValue)(p)
}
func (d *durationValue) Set(s string) error {
v, err := time.ParseDuration(s)
*d = durationValue(v)
return err
}
func (d *durationValue) Type() string {
return "duration"
}
func (d *durationValue) String() string { return (*time.Duration)(d).String() }
func durationConv(sval string) (interface{}, error) {
return time.ParseDuration(sval)
}
// GetDuration return the duration value of a flag with the given name
func (f *FlagSet) GetDuration(name string) (time.Duration, error) {
val, err := f.getFlagType(name, "duration", durationConv)
if err != nil {
return 0, err
}
return val.(time.Duration), nil
}
// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
// The argument p points to a time.Duration variable in which to store the value of the flag.
func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
f.VarP(newDurationValue(value, p), name, "", usage)
}
// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
f.VarP(newDurationValue(value, p), name, shorthand, usage)
}
// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
// The argument p points to a time.Duration variable in which to store the value of the flag.
func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
CommandLine.VarP(newDurationValue(value, p), name, "", usage)
}
// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage)
}
// Duration defines a time.Duration flag with specified name, default value, and usage string.
// The return value is the address of a time.Duration variable that stores the value of the flag.
func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
p := new(time.Duration)
f.DurationVarP(p, name, "", value, usage)
return p
}
// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
p := new(time.Duration)
f.DurationVarP(p, name, shorthand, value, usage)
return p
}
// Duration defines a time.Duration flag with specified name, default value, and usage string.
// The return value is the address of a time.Duration variable that stores the value of the flag.
func Duration(name string, value time.Duration, usage string) *time.Duration {
return CommandLine.DurationP(name, "", value, usage)
}
// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
return CommandLine.DurationP(name, shorthand, value, usage)
}

View File

@ -1,836 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package pflag is a drop-in replacement for Go's flag package, implementing
POSIX/GNU-style --flags.
pflag is compatible with the GNU extensions to the POSIX recommendations
for command-line options. See
http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
Usage:
pflag is a drop-in replacement of Go's native flag package. If you import
pflag under the name "flag" then all code should continue to function
with no changes.
import flag "github.com/ogier/pflag"
There is one exception to this: if you directly instantiate the Flag struct
there is one more field "Shorthand" that you will need to set.
Most code never instantiates this struct directly, and instead uses
functions such as String(), BoolVar(), and Var(), and is therefore
unaffected.
Define flags using flag.String(), Bool(), Int(), etc.
This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
var ip = flag.Int("flagname", 1234, "help message for flagname")
If you like, you can bind the flag to a variable using the Var() functions.
var flagvar int
func init() {
flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
}
Or you can create custom flags that satisfy the Value interface (with
pointer receivers) and couple them to flag parsing by
flag.Var(&flagVal, "name", "help message for flagname")
For such flags, the default value is just the initial value of the variable.
After all flags are defined, call
flag.Parse()
to parse the command line into the defined flags.
Flags may then be used directly. If you're using the flags themselves,
they are all pointers; if you bind to variables, they're values.
fmt.Println("ip has value ", *ip)
fmt.Println("flagvar has value ", flagvar)
After parsing, the arguments after the flag are available as the
slice flag.Args() or individually as flag.Arg(i).
The arguments are indexed from 0 through flag.NArg()-1.
The pflag package also defines some new functions that are not in flag,
that give one-letter shorthands for flags. You can use these by appending
'P' to the name of any function that defines a flag.
var ip = flag.IntP("flagname", "f", 1234, "help message")
var flagvar bool
func init() {
flag.BoolVarP("boolname", "b", true, "help message")
}
flag.VarP(&flagVar, "varname", "v", 1234, "help message")
Shorthand letters can be used with single dashes on the command line.
Boolean shorthand flags can be combined with other shorthand flags.
Command line flag syntax:
--flag // boolean flags only
--flag=x
Unlike the flag package, a single dash before an option means something
different than a double dash. Single dashes signify a series of shorthand
letters for flags. All but the last shorthand letter must be boolean flags.
// boolean flags
-f
-abc
// non-boolean flags
-n 1234
-Ifile
// mixed
-abcs "hello"
-abcn1234
Flag parsing stops after the terminator "--". Unlike the flag package,
flags can be interspersed with arguments anywhere on the command line
before this terminator.
Integer flags accept 1234, 0664, 0x1234 and may be negative.
Boolean flags (in their long form) accept 1, 0, t, f, true, false,
TRUE, FALSE, True, False.
Duration flags accept any input valid for time.ParseDuration.
The default set of command-line flags is controlled by
top-level functions. The FlagSet type allows one to define
independent sets of flags, such as to implement subcommands
in a command-line interface. The methods of FlagSet are
analogous to the top-level functions for the command-line
flag set.
*/
package pflag
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"sort"
"strings"
)
// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined.
var ErrHelp = errors.New("pflag: help requested")
// ErrorHandling defines how to handle flag parsing errors.
type ErrorHandling int
const (
// ContinueOnError will return an err from Parse() if an error is found
ContinueOnError ErrorHandling = iota
// ExitOnError will call os.Exit(2) if an error is found when parsing
ExitOnError
// PanicOnError will panic() if an error is found when parsing flags
PanicOnError
)
// NormalizedName is a flag name that has been normalized according to rules
// for the FlagSet (e.g. making '-' and '_' equivalent).
type NormalizedName string
// A FlagSet represents a set of defined flags.
type FlagSet struct {
// Usage is the function called when an error occurs while parsing flags.
// The field is a function (not a method) that may be changed to point to
// a custom error handler.
Usage func()
name string
parsed bool
actual map[NormalizedName]*Flag
formal map[NormalizedName]*Flag
shorthands map[byte]*Flag
args []string // arguments after flags
argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no --
exitOnError bool // does the program exit if there's an error?
errorHandling ErrorHandling
output io.Writer // nil means stderr; use out() accessor
interspersed bool // allow interspersed option/non-option args
normalizeNameFunc func(f *FlagSet, name string) NormalizedName
}
// A Flag represents the state of a flag.
type Flag struct {
Name string // name as it appears on command line
Shorthand string // one-letter abbreviated flag
Usage string // help message
Value Value // value as set
DefValue string // default value (as text); for usage message
Changed bool // If the user set the value (or if left to default)
NoOptDefVal string //default value (as text); if the flag is on the command line without any options
Deprecated string // If this flag is deprecated, this string is the new or now thing to use
Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text
ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use
Annotations map[string][]string // used by cobra.Command bash autocomple code
}
// Value is the interface to the dynamic value stored in a flag.
// (The default value is represented as a string.)
type Value interface {
String() string
Set(string) error
Type() string
}
// sortFlags returns the flags as a slice in lexicographical sorted order.
func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
list := make(sort.StringSlice, len(flags))
i := 0
for k := range flags {
list[i] = string(k)
i++
}
list.Sort()
result := make([]*Flag, len(list))
for i, name := range list {
result[i] = flags[NormalizedName(name)]
}
return result
}
// SetNormalizeFunc allows you to add a function which can translate flag names.
// Flags added to the FlagSet will be translated and then when anything tries to
// look up the flag that will also be translated. So it would be possible to create
// a flag named "getURL" and have it translated to "geturl". A user could then pass
// "--getUrl" which may also be translated to "geturl" and everything will work.
func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
f.normalizeNameFunc = n
for k, v := range f.formal {
delete(f.formal, k)
nname := f.normalizeFlagName(string(k))
f.formal[nname] = v
v.Name = string(nname)
}
}
// GetNormalizeFunc returns the previously set NormalizeFunc of a function which
// does no translation, if not set previously.
func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName {
if f.normalizeNameFunc != nil {
return f.normalizeNameFunc
}
return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) }
}
func (f *FlagSet) normalizeFlagName(name string) NormalizedName {
n := f.GetNormalizeFunc()
return n(f, name)
}
func (f *FlagSet) out() io.Writer {
if f.output == nil {
return os.Stderr
}
return f.output
}
// SetOutput sets the destination for usage and error messages.
// If output is nil, os.Stderr is used.
func (f *FlagSet) SetOutput(output io.Writer) {
f.output = output
}
// VisitAll visits the flags in lexicographical order, calling fn for each.
// It visits all flags, even those not set.
func (f *FlagSet) VisitAll(fn func(*Flag)) {
for _, flag := range sortFlags(f.formal) {
fn(flag)
}
}
// HasFlags returns a bool to indicate if the FlagSet has any flags definied.
func (f *FlagSet) HasFlags() bool {
return len(f.formal) > 0
}
// VisitAll visits the command-line flags in lexicographical order, calling
// fn for each. It visits all flags, even those not set.
func VisitAll(fn func(*Flag)) {
CommandLine.VisitAll(fn)
}
// Visit visits the flags in lexicographical order, calling fn for each.
// It visits only those flags that have been set.
func (f *FlagSet) Visit(fn func(*Flag)) {
for _, flag := range sortFlags(f.actual) {
fn(flag)
}
}
// Visit visits the command-line flags in lexicographical order, calling fn
// for each. It visits only those flags that have been set.
func Visit(fn func(*Flag)) {
CommandLine.Visit(fn)
}
// Lookup returns the Flag structure of the named flag, returning nil if none exists.
func (f *FlagSet) Lookup(name string) *Flag {
return f.lookup(f.normalizeFlagName(name))
}
// lookup returns the Flag structure of the named flag, returning nil if none exists.
func (f *FlagSet) lookup(name NormalizedName) *Flag {
return f.formal[name]
}
// func to return a given type for a given flag name
func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) {
flag := f.Lookup(name)
if flag == nil {
err := fmt.Errorf("flag accessed but not defined: %s", name)
return nil, err
}
if flag.Value.Type() != ftype {
err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type())
return nil, err
}
sval := flag.Value.String()
result, err := convFunc(sval)
if err != nil {
return nil, err
}
return result, nil
}
// ArgsLenAtDash will return the length of f.Args at the moment when a -- was
// found during arg parsing. This allows your program to know which args were
// before the -- and which came after.
func (f *FlagSet) ArgsLenAtDash() int {
return f.argsLenAtDash
}
// MarkDeprecated indicated that a flag is deprecated in your program. It will
// continue to function but will not show up in help or usage messages. Using
// this flag will also print the given usageMessage.
func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
flag := f.Lookup(name)
if flag == nil {
return fmt.Errorf("flag %q does not exist", name)
}
if len(usageMessage) == 0 {
return fmt.Errorf("deprecated message for flag %q must be set", name)
}
flag.Deprecated = usageMessage
return nil
}
// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your
// program. It will continue to function but will not show up in help or usage
// messages. Using this flag will also print the given usageMessage.
func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error {
flag := f.Lookup(name)
if flag == nil {
return fmt.Errorf("flag %q does not exist", name)
}
if len(usageMessage) == 0 {
return fmt.Errorf("deprecated message for flag %q must be set", name)
}
flag.ShorthandDeprecated = usageMessage
return nil
}
// MarkHidden sets a flag to 'hidden' in your program. It will continue to
// function but will not show up in help or usage messages.
func (f *FlagSet) MarkHidden(name string) error {
flag := f.Lookup(name)
if flag == nil {
return fmt.Errorf("flag %q does not exist", name)
}
flag.Hidden = true
return nil
}
// Lookup returns the Flag structure of the named command-line flag,
// returning nil if none exists.
func Lookup(name string) *Flag {
return CommandLine.Lookup(name)
}
// Set sets the value of the named flag.
func (f *FlagSet) Set(name, value string) error {
normalName := f.normalizeFlagName(name)
flag, ok := f.formal[normalName]
if !ok {
return fmt.Errorf("no such flag -%v", name)
}
err := flag.Value.Set(value)
if err != nil {
return err
}
if f.actual == nil {
f.actual = make(map[NormalizedName]*Flag)
}
f.actual[normalName] = flag
flag.Changed = true
if len(flag.Deprecated) > 0 {
fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
}
return nil
}
// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet.
// This is sometimes used by spf13/cobra programs which want to generate additional
// bash completion information.
func (f *FlagSet) SetAnnotation(name, key string, values []string) error {
normalName := f.normalizeFlagName(name)
flag, ok := f.formal[normalName]
if !ok {
return fmt.Errorf("no such flag -%v", name)
}
if flag.Annotations == nil {
flag.Annotations = map[string][]string{}
}
flag.Annotations[key] = values
return nil
}
// Changed returns true if the flag was explicitly set during Parse() and false
// otherwise
func (f *FlagSet) Changed(name string) bool {
flag := f.Lookup(name)
// If a flag doesn't exist, it wasn't changed....
if flag == nil {
return false
}
return flag.Changed
}
// Set sets the value of the named command-line flag.
func Set(name, value string) error {
return CommandLine.Set(name, value)
}
// PrintDefaults prints, to standard error unless configured
// otherwise, the default values of all defined flags in the set.
func (f *FlagSet) PrintDefaults() {
usages := f.FlagUsages()
fmt.Fprintf(f.out(), "%s", usages)
}
// FlagUsages Returns a string containing the usage information for all flags in
// the FlagSet
func (f *FlagSet) FlagUsages() string {
x := new(bytes.Buffer)
f.VisitAll(func(flag *Flag) {
if len(flag.Deprecated) > 0 || flag.Hidden {
return
}
format := ""
if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 {
format = " -%s, --%s"
} else {
format = " %s --%s"
}
if len(flag.NoOptDefVal) > 0 {
format = format + "["
}
if flag.Value.Type() == "string" {
// put quotes on the value
format = format + "=%q"
} else {
format = format + "=%s"
}
if len(flag.NoOptDefVal) > 0 {
format = format + "]"
}
format = format + ": %s\n"
shorthand := flag.Shorthand
if len(flag.ShorthandDeprecated) > 0 {
shorthand = ""
}
fmt.Fprintf(x, format, shorthand, flag.Name, flag.DefValue, flag.Usage)
})
return x.String()
}
// PrintDefaults prints to standard error the default values of all defined command-line flags.
func PrintDefaults() {
CommandLine.PrintDefaults()
}
// defaultUsage is the default function to print a usage message.
func defaultUsage(f *FlagSet) {
fmt.Fprintf(f.out(), "Usage of %s:\n", f.name)
f.PrintDefaults()
}
// NOTE: Usage is not just defaultUsage(CommandLine)
// because it serves (via godoc flag Usage) as the example
// for how to write your own usage function.
// Usage prints to standard error a usage message documenting all defined command-line flags.
// The function is a variable that may be changed to point to a custom function.
var Usage = func() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
PrintDefaults()
}
// NFlag returns the number of flags that have been set.
func (f *FlagSet) NFlag() int { return len(f.actual) }
// NFlag returns the number of command-line flags that have been set.
func NFlag() int { return len(CommandLine.actual) }
// Arg returns the i'th argument. Arg(0) is the first remaining argument
// after flags have been processed.
func (f *FlagSet) Arg(i int) string {
if i < 0 || i >= len(f.args) {
return ""
}
return f.args[i]
}
// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
// after flags have been processed.
func Arg(i int) string {
return CommandLine.Arg(i)
}
// NArg is the number of arguments remaining after flags have been processed.
func (f *FlagSet) NArg() int { return len(f.args) }
// NArg is the number of arguments remaining after flags have been processed.
func NArg() int { return len(CommandLine.args) }
// Args returns the non-flag arguments.
func (f *FlagSet) Args() []string { return f.args }
// Args returns the non-flag command-line arguments.
func Args() []string { return CommandLine.args }
// Var defines a flag with the specified name and usage string. The type and
// value of the flag are represented by the first argument, of type Value, which
// typically holds a user-defined implementation of Value. For instance, the
// caller could create a flag that turns a comma-separated string into a slice
// of strings by giving the slice the methods of Value; in particular, Set would
// decompose the comma-separated string into the slice.
func (f *FlagSet) Var(value Value, name string, usage string) {
f.VarP(value, name, "", usage)
}
// VarPF is like VarP, but returns the flag created
func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag {
// Remember the default value as a string; it won't change.
flag := &Flag{
Name: name,
Shorthand: shorthand,
Usage: usage,
Value: value,
DefValue: value.String(),
}
f.AddFlag(flag)
return flag
}
// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
_ = f.VarPF(value, name, shorthand, usage)
}
// AddFlag will add the flag to the FlagSet
func (f *FlagSet) AddFlag(flag *Flag) {
// Call normalizeFlagName function only once
normalizedFlagName := f.normalizeFlagName(flag.Name)
_, alreadythere := f.formal[normalizedFlagName]
if alreadythere {
msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
fmt.Fprintln(f.out(), msg)
panic(msg) // Happens only if flags are declared with identical names
}
if f.formal == nil {
f.formal = make(map[NormalizedName]*Flag)
}
flag.Name = string(normalizedFlagName)
f.formal[normalizedFlagName] = flag
if len(flag.Shorthand) == 0 {
return
}
if len(flag.Shorthand) > 1 {
fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand)
panic("shorthand is more than one character")
}
if f.shorthands == nil {
f.shorthands = make(map[byte]*Flag)
}
c := flag.Shorthand[0]
old, alreadythere := f.shorthands[c]
if alreadythere {
fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s already used for %s\n", f.name, c, flag.Name, old.Name)
panic("shorthand redefinition")
}
f.shorthands[c] = flag
}
// AddFlagSet adds one FlagSet to another. If a flag is already present in f
// the flag from newSet will be ignored
func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
if newSet == nil {
return
}
newSet.VisitAll(func(flag *Flag) {
if f.Lookup(flag.Name) == nil {
f.AddFlag(flag)
}
})
}
// Var defines a flag with the specified name and usage string. The type and
// value of the flag are represented by the first argument, of type Value, which
// typically holds a user-defined implementation of Value. For instance, the
// caller could create a flag that turns a comma-separated string into a slice
// of strings by giving the slice the methods of Value; in particular, Set would
// decompose the comma-separated string into the slice.
func Var(value Value, name string, usage string) {
CommandLine.VarP(value, name, "", usage)
}
// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
func VarP(value Value, name, shorthand, usage string) {
CommandLine.VarP(value, name, shorthand, usage)
}
// failf prints to standard error a formatted error and usage message and
// returns the error.
func (f *FlagSet) failf(format string, a ...interface{}) error {
err := fmt.Errorf(format, a...)
fmt.Fprintln(f.out(), err)
f.usage()
return err
}
// usage calls the Usage method for the flag set, or the usage function if
// the flag set is CommandLine.
func (f *FlagSet) usage() {
if f == CommandLine {
Usage()
} else if f.Usage == nil {
defaultUsage(f)
} else {
f.Usage()
}
}
func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error {
if err := flag.Value.Set(value); err != nil {
return f.failf("invalid argument %q for %s: %v", value, origArg, err)
}
// mark as visited for Visit()
if f.actual == nil {
f.actual = make(map[NormalizedName]*Flag)
}
f.actual[f.normalizeFlagName(flag.Name)] = flag
flag.Changed = true
if len(flag.Deprecated) > 0 {
fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
}
if len(flag.ShorthandDeprecated) > 0 && containsShorthand(origArg, flag.Shorthand) {
fmt.Fprintf(os.Stderr, "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
}
return nil
}
func containsShorthand(arg, shorthand string) bool {
// filter out flags --<flag_name>
if strings.HasPrefix(arg, "-") {
return false
}
arg = strings.SplitN(arg, "=", 2)[0]
return strings.Contains(arg, shorthand)
}
func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) {
a = args
name := s[2:]
if len(name) == 0 || name[0] == '-' || name[0] == '=' {
err = f.failf("bad flag syntax: %s", s)
return
}
split := strings.SplitN(name, "=", 2)
name = split[0]
flag, alreadythere := f.formal[f.normalizeFlagName(name)]
if !alreadythere {
if name == "help" { // special case for nice help message.
f.usage()
return a, ErrHelp
}
err = f.failf("unknown flag: --%s", name)
return
}
var value string
if len(split) == 2 {
// '--flag=arg'
value = split[1]
} else if len(flag.NoOptDefVal) > 0 {
// '--flag' (arg was optional)
value = flag.NoOptDefVal
} else if len(a) > 0 {
// '--flag arg'
value = a[0]
a = a[1:]
} else {
// '--flag' (arg was required)
err = f.failf("flag needs an argument: %s", s)
return
}
err = f.setFlag(flag, value, s)
return
}
func (f *FlagSet) parseSingleShortArg(shorthands string, args []string) (outShorts string, outArgs []string, err error) {
outArgs = args
outShorts = shorthands[1:]
c := shorthands[0]
flag, alreadythere := f.shorthands[c]
if !alreadythere {
if c == 'h' { // special case for nice help message.
f.usage()
err = ErrHelp
return
}
//TODO continue on error
err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
return
}
var value string
if len(shorthands) > 2 && shorthands[1] == '=' {
value = shorthands[2:]
outShorts = ""
} else if len(flag.NoOptDefVal) > 0 {
value = flag.NoOptDefVal
} else if len(shorthands) > 1 {
value = shorthands[1:]
outShorts = ""
} else if len(args) > 0 {
value = args[0]
outArgs = args[1:]
} else {
err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
return
}
err = f.setFlag(flag, value, shorthands)
return
}
func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error) {
a = args
shorthands := s[1:]
for len(shorthands) > 0 {
shorthands, a, err = f.parseSingleShortArg(shorthands, args)
if err != nil {
return
}
}
return
}
func (f *FlagSet) parseArgs(args []string) (err error) {
for len(args) > 0 {
s := args[0]
args = args[1:]
if len(s) == 0 || s[0] != '-' || len(s) == 1 {
if !f.interspersed {
f.args = append(f.args, s)
f.args = append(f.args, args...)
return nil
}
f.args = append(f.args, s)
continue
}
if s[1] == '-' {
if len(s) == 2 { // "--" terminates the flags
f.argsLenAtDash = len(f.args)
f.args = append(f.args, args...)
break
}
args, err = f.parseLongArg(s, args)
} else {
args, err = f.parseShortArg(s, args)
}
if err != nil {
return
}
}
return
}
// Parse parses flag definitions from the argument list, which should not
// include the command name. Must be called after all flags in the FlagSet
// are defined and before flags are accessed by the program.
// The return value will be ErrHelp if -help was set but not defined.
func (f *FlagSet) Parse(arguments []string) error {
f.parsed = true
f.args = make([]string, 0, len(arguments))
err := f.parseArgs(arguments)
if err != nil {
switch f.errorHandling {
case ContinueOnError:
return err
case ExitOnError:
os.Exit(2)
case PanicOnError:
panic(err)
}
}
return nil
}
// Parsed reports whether f.Parse has been called.
func (f *FlagSet) Parsed() bool {
return f.parsed
}
// Parse parses the command-line flags from os.Args[1:]. Must be called
// after all flags are defined and before flags are accessed by the program.
func Parse() {
// Ignore errors; CommandLine is set for ExitOnError.
CommandLine.Parse(os.Args[1:])
}
// SetInterspersed sets whether to support interspersed option/non-option arguments.
func SetInterspersed(interspersed bool) {
CommandLine.SetInterspersed(interspersed)
}
// Parsed returns true if the command-line flags have been parsed.
func Parsed() bool {
return CommandLine.Parsed()
}
// The default set of command-line flags, parsed from os.Args.
var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
// NewFlagSet returns a new, empty flag set with the specified name and
// error handling property.
func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
f := &FlagSet{
name: name,
errorHandling: errorHandling,
argsLenAtDash: -1,
interspersed: true,
}
return f
}
// SetInterspersed sets whether to support interspersed option/non-option arguments.
func (f *FlagSet) SetInterspersed(interspersed bool) {
f.interspersed = interspersed
}
// Init sets the name and error handling property for a flag set.
// By default, the zero FlagSet uses an empty name and the
// ContinueOnError error handling policy.
func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
f.name = name
f.errorHandling = errorHandling
f.argsLenAtDash = -1
}

View File

@ -1,91 +0,0 @@
package pflag
import (
"fmt"
"strconv"
)
// -- float32 Value
type float32Value float32
func newFloat32Value(val float32, p *float32) *float32Value {
*p = val
return (*float32Value)(p)
}
func (f *float32Value) Set(s string) error {
v, err := strconv.ParseFloat(s, 32)
*f = float32Value(v)
return err
}
func (f *float32Value) Type() string {
return "float32"
}
func (f *float32Value) String() string { return fmt.Sprintf("%v", *f) }
func float32Conv(sval string) (interface{}, error) {
v, err := strconv.ParseFloat(sval, 32)
if err != nil {
return 0, err
}
return float32(v), nil
}
// GetFloat32 return the float32 value of a flag with the given name
func (f *FlagSet) GetFloat32(name string) (float32, error) {
val, err := f.getFlagType(name, "float32", float32Conv)
if err != nil {
return 0, err
}
return val.(float32), nil
}
// Float32Var defines a float32 flag with specified name, default value, and usage string.
// The argument p points to a float32 variable in which to store the value of the flag.
func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) {
f.VarP(newFloat32Value(value, p), name, "", usage)
}
// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
f.VarP(newFloat32Value(value, p), name, shorthand, usage)
}
// Float32Var defines a float32 flag with specified name, default value, and usage string.
// The argument p points to a float32 variable in which to store the value of the flag.
func Float32Var(p *float32, name string, value float32, usage string) {
CommandLine.VarP(newFloat32Value(value, p), name, "", usage)
}
// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
func Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage)
}
// Float32 defines a float32 flag with specified name, default value, and usage string.
// The return value is the address of a float32 variable that stores the value of the flag.
func (f *FlagSet) Float32(name string, value float32, usage string) *float32 {
p := new(float32)
f.Float32VarP(p, name, "", value, usage)
return p
}
// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 {
p := new(float32)
f.Float32VarP(p, name, shorthand, value, usage)
return p
}
// Float32 defines a float32 flag with specified name, default value, and usage string.
// The return value is the address of a float32 variable that stores the value of the flag.
func Float32(name string, value float32, usage string) *float32 {
return CommandLine.Float32P(name, "", value, usage)
}
// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
func Float32P(name, shorthand string, value float32, usage string) *float32 {
return CommandLine.Float32P(name, shorthand, value, usage)
}

View File

@ -1,87 +0,0 @@
package pflag
import (
"fmt"
"strconv"
)
// -- float64 Value
type float64Value float64
func newFloat64Value(val float64, p *float64) *float64Value {
*p = val
return (*float64Value)(p)
}
func (f *float64Value) Set(s string) error {
v, err := strconv.ParseFloat(s, 64)
*f = float64Value(v)
return err
}
func (f *float64Value) Type() string {
return "float64"
}
func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) }
func float64Conv(sval string) (interface{}, error) {
return strconv.ParseFloat(sval, 64)
}
// GetFloat64 return the float64 value of a flag with the given name
func (f *FlagSet) GetFloat64(name string) (float64, error) {
val, err := f.getFlagType(name, "float64", float64Conv)
if err != nil {
return 0, err
}
return val.(float64), nil
}
// Float64Var defines a float64 flag with specified name, default value, and usage string.
// The argument p points to a float64 variable in which to store the value of the flag.
func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
f.VarP(newFloat64Value(value, p), name, "", usage)
}
// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
f.VarP(newFloat64Value(value, p), name, shorthand, usage)
}
// Float64Var defines a float64 flag with specified name, default value, and usage string.
// The argument p points to a float64 variable in which to store the value of the flag.
func Float64Var(p *float64, name string, value float64, usage string) {
CommandLine.VarP(newFloat64Value(value, p), name, "", usage)
}
// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
func Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage)
}
// Float64 defines a float64 flag with specified name, default value, and usage string.
// The return value is the address of a float64 variable that stores the value of the flag.
func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
p := new(float64)
f.Float64VarP(p, name, "", value, usage)
return p
}
// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 {
p := new(float64)
f.Float64VarP(p, name, shorthand, value, usage)
return p
}
// Float64 defines a float64 flag with specified name, default value, and usage string.
// The return value is the address of a float64 variable that stores the value of the flag.
func Float64(name string, value float64, usage string) *float64 {
return CommandLine.Float64P(name, "", value, usage)
}
// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
func Float64P(name, shorthand string, value float64, usage string) *float64 {
return CommandLine.Float64P(name, shorthand, value, usage)
}

View File

@ -1,97 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pflag
import (
goflag "flag"
"fmt"
"reflect"
"strings"
)
var _ = fmt.Print
// flagValueWrapper implements pflag.Value around a flag.Value. The main
// difference here is the addition of the Type method that returns a string
// name of the type. As this is generally unknown, we approximate that with
// reflection.
type flagValueWrapper struct {
inner goflag.Value
flagType string
}
// We are just copying the boolFlag interface out of goflag as that is what
// they use to decide if a flag should get "true" when no arg is given.
type goBoolFlag interface {
goflag.Value
IsBoolFlag() bool
}
func wrapFlagValue(v goflag.Value) Value {
// If the flag.Value happens to also be a pflag.Value, just use it directly.
if pv, ok := v.(Value); ok {
return pv
}
pv := &flagValueWrapper{
inner: v,
}
t := reflect.TypeOf(v)
if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr {
t = t.Elem()
}
pv.flagType = strings.TrimSuffix(t.Name(), "Value")
return pv
}
func (v *flagValueWrapper) String() string {
return v.inner.String()
}
func (v *flagValueWrapper) Set(s string) error {
return v.inner.Set(s)
}
func (v *flagValueWrapper) Type() string {
return v.flagType
}
// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag
func PFlagFromGoFlag(goflag *goflag.Flag) *Flag {
// Remember the default value as a string; it won't change.
flag := &Flag{
Name: goflag.Name,
Usage: goflag.Usage,
Value: wrapFlagValue(goflag.Value),
// Looks like golang flags don't set DefValue correctly :-(
//DefValue: goflag.DefValue,
DefValue: goflag.Value.String(),
}
if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() {
flag.NoOptDefVal = "true"
}
return flag
}
// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet
func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) {
if f.Lookup(goflag.Name) != nil {
return
}
newflag := PFlagFromGoFlag(goflag)
f.AddFlag(newflag)
}
// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet
func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) {
if newSet == nil {
return
}
newSet.VisitAll(func(goflag *goflag.Flag) {
f.AddGoFlag(goflag)
})
}

View File

@ -1,87 +0,0 @@
package pflag
import (
"fmt"
"strconv"
)
// -- int Value
type intValue int
func newIntValue(val int, p *int) *intValue {
*p = val
return (*intValue)(p)
}
func (i *intValue) Set(s string) error {
v, err := strconv.ParseInt(s, 0, 64)
*i = intValue(v)
return err
}
func (i *intValue) Type() string {
return "int"
}
func (i *intValue) String() string { return fmt.Sprintf("%v", *i) }
func intConv(sval string) (interface{}, error) {
return strconv.Atoi(sval)
}
// GetInt return the int value of a flag with the given name
func (f *FlagSet) GetInt(name string) (int, error) {
val, err := f.getFlagType(name, "int", intConv)
if err != nil {
return 0, err
}
return val.(int), nil
}
// IntVar defines an int flag with specified name, default value, and usage string.
// The argument p points to an int variable in which to store the value of the flag.
func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
f.VarP(newIntValue(value, p), name, "", usage)
}
// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) {
f.VarP(newIntValue(value, p), name, shorthand, usage)
}
// IntVar defines an int flag with specified name, default value, and usage string.
// The argument p points to an int variable in which to store the value of the flag.
func IntVar(p *int, name string, value int, usage string) {
CommandLine.VarP(newIntValue(value, p), name, "", usage)
}
// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
func IntVarP(p *int, name, shorthand string, value int, usage string) {
CommandLine.VarP(newIntValue(value, p), name, shorthand, usage)
}
// Int defines an int flag with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the flag.
func (f *FlagSet) Int(name string, value int, usage string) *int {
p := new(int)
f.IntVarP(p, name, "", value, usage)
return p
}
// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int {
p := new(int)
f.IntVarP(p, name, shorthand, value, usage)
return p
}
// Int defines an int flag with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the flag.
func Int(name string, value int, usage string) *int {
return CommandLine.IntP(name, "", value, usage)
}
// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
func IntP(name, shorthand string, value int, usage string) *int {
return CommandLine.IntP(name, shorthand, value, usage)
}

View File

@ -1,91 +0,0 @@
package pflag
import (
"fmt"
"strconv"
)
// -- int32 Value
type int32Value int32
func newInt32Value(val int32, p *int32) *int32Value {
*p = val
return (*int32Value)(p)
}
func (i *int32Value) Set(s string) error {
v, err := strconv.ParseInt(s, 0, 32)
*i = int32Value(v)
return err
}
func (i *int32Value) Type() string {
return "int32"
}
func (i *int32Value) String() string { return fmt.Sprintf("%v", *i) }
func int32Conv(sval string) (interface{}, error) {
v, err := strconv.ParseInt(sval, 0, 32)
if err != nil {
return 0, err
}
return int32(v), nil
}
// GetInt32 return the int32 value of a flag with the given name
func (f *FlagSet) GetInt32(name string) (int32, error) {
val, err := f.getFlagType(name, "int32", int32Conv)
if err != nil {
return 0, err
}
return val.(int32), nil
}
// Int32Var defines an int32 flag with specified name, default value, and usage string.
// The argument p points to an int32 variable in which to store the value of the flag.
func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) {
f.VarP(newInt32Value(value, p), name, "", usage)
}
// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
f.VarP(newInt32Value(value, p), name, shorthand, usage)
}
// Int32Var defines an int32 flag with specified name, default value, and usage string.
// The argument p points to an int32 variable in which to store the value of the flag.
func Int32Var(p *int32, name string, value int32, usage string) {
CommandLine.VarP(newInt32Value(value, p), name, "", usage)
}
// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
func Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage)
}
// Int32 defines an int32 flag with specified name, default value, and usage string.
// The return value is the address of an int32 variable that stores the value of the flag.
func (f *FlagSet) Int32(name string, value int32, usage string) *int32 {
p := new(int32)
f.Int32VarP(p, name, "", value, usage)
return p
}
// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 {
p := new(int32)
f.Int32VarP(p, name, shorthand, value, usage)
return p
}
// Int32 defines an int32 flag with specified name, default value, and usage string.
// The return value is the address of an int32 variable that stores the value of the flag.
func Int32(name string, value int32, usage string) *int32 {
return CommandLine.Int32P(name, "", value, usage)
}
// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
func Int32P(name, shorthand string, value int32, usage string) *int32 {
return CommandLine.Int32P(name, shorthand, value, usage)
}

View File

@ -1,87 +0,0 @@
package pflag
import (
"fmt"
"strconv"
)
// -- int64 Value
type int64Value int64
func newInt64Value(val int64, p *int64) *int64Value {
*p = val
return (*int64Value)(p)
}
func (i *int64Value) Set(s string) error {
v, err := strconv.ParseInt(s, 0, 64)
*i = int64Value(v)
return err
}
func (i *int64Value) Type() string {
return "int64"
}
func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) }
func int64Conv(sval string) (interface{}, error) {
return strconv.ParseInt(sval, 0, 64)
}
// GetInt64 return the int64 value of a flag with the given name
func (f *FlagSet) GetInt64(name string) (int64, error) {
val, err := f.getFlagType(name, "int64", int64Conv)
if err != nil {
return 0, err
}
return val.(int64), nil
}
// Int64Var defines an int64 flag with specified name, default value, and usage string.
// The argument p points to an int64 variable in which to store the value of the flag.
func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
f.VarP(newInt64Value(value, p), name, "", usage)
}
// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
f.VarP(newInt64Value(value, p), name, shorthand, usage)
}
// Int64Var defines an int64 flag with specified name, default value, and usage string.
// The argument p points to an int64 variable in which to store the value of the flag.
func Int64Var(p *int64, name string, value int64, usage string) {
CommandLine.VarP(newInt64Value(value, p), name, "", usage)
}
// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
func Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage)
}
// Int64 defines an int64 flag with specified name, default value, and usage string.
// The return value is the address of an int64 variable that stores the value of the flag.
func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
p := new(int64)
f.Int64VarP(p, name, "", value, usage)
return p
}
// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 {
p := new(int64)
f.Int64VarP(p, name, shorthand, value, usage)
return p
}
// Int64 defines an int64 flag with specified name, default value, and usage string.
// The return value is the address of an int64 variable that stores the value of the flag.
func Int64(name string, value int64, usage string) *int64 {
return CommandLine.Int64P(name, "", value, usage)
}
// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
func Int64P(name, shorthand string, value int64, usage string) *int64 {
return CommandLine.Int64P(name, shorthand, value, usage)
}

View File

@ -1,91 +0,0 @@
package pflag
import (
"fmt"
"strconv"
)
// -- int8 Value
type int8Value int8
func newInt8Value(val int8, p *int8) *int8Value {
*p = val
return (*int8Value)(p)
}
func (i *int8Value) Set(s string) error {
v, err := strconv.ParseInt(s, 0, 8)
*i = int8Value(v)
return err
}
func (i *int8Value) Type() string {
return "int8"
}
func (i *int8Value) String() string { return fmt.Sprintf("%v", *i) }
func int8Conv(sval string) (interface{}, error) {
v, err := strconv.ParseInt(sval, 0, 8)
if err != nil {
return 0, err
}
return int8(v), nil
}
// GetInt8 return the int8 value of a flag with the given name
func (f *FlagSet) GetInt8(name string) (int8, error) {
val, err := f.getFlagType(name, "int8", int8Conv)
if err != nil {
return 0, err
}
return val.(int8), nil
}
// Int8Var defines an int8 flag with specified name, default value, and usage string.
// The argument p points to an int8 variable in which to store the value of the flag.
func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) {
f.VarP(newInt8Value(value, p), name, "", usage)
}
// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
f.VarP(newInt8Value(value, p), name, shorthand, usage)
}
// Int8Var defines an int8 flag with specified name, default value, and usage string.
// The argument p points to an int8 variable in which to store the value of the flag.
func Int8Var(p *int8, name string, value int8, usage string) {
CommandLine.VarP(newInt8Value(value, p), name, "", usage)
}
// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
func Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage)
}
// Int8 defines an int8 flag with specified name, default value, and usage string.
// The return value is the address of an int8 variable that stores the value of the flag.
func (f *FlagSet) Int8(name string, value int8, usage string) *int8 {
p := new(int8)
f.Int8VarP(p, name, "", value, usage)
return p
}
// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 {
p := new(int8)
f.Int8VarP(p, name, shorthand, value, usage)
return p
}
// Int8 defines an int8 flag with specified name, default value, and usage string.
// The return value is the address of an int8 variable that stores the value of the flag.
func Int8(name string, value int8, usage string) *int8 {
return CommandLine.Int8P(name, "", value, usage)
}
// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
func Int8P(name, shorthand string, value int8, usage string) *int8 {
return CommandLine.Int8P(name, shorthand, value, usage)
}

View File

@ -1,128 +0,0 @@
package pflag
import (
"fmt"
"strconv"
"strings"
)
// -- intSlice Value
type intSliceValue struct {
value *[]int
changed bool
}
func newIntSliceValue(val []int, p *[]int) *intSliceValue {
isv := new(intSliceValue)
isv.value = p
*isv.value = val
return isv
}
func (s *intSliceValue) Set(val string) error {
ss := strings.Split(val, ",")
out := make([]int, len(ss))
for i, d := range ss {
var err error
out[i], err = strconv.Atoi(d)
if err != nil {
return err
}
}
if !s.changed {
*s.value = out
} else {
*s.value = append(*s.value, out...)
}
s.changed = true
return nil
}
func (s *intSliceValue) Type() string {
return "intSlice"
}
func (s *intSliceValue) String() string {
out := make([]string, len(*s.value))
for i, d := range *s.value {
out[i] = fmt.Sprintf("%d", d)
}
return "[" + strings.Join(out, ",") + "]"
}
func intSliceConv(val string) (interface{}, error) {
val = strings.Trim(val, "[]")
// Empty string would cause a slice with one (empty) entry
if len(val) == 0 {
return []int{}, nil
}
ss := strings.Split(val, ",")
out := make([]int, len(ss))
for i, d := range ss {
var err error
out[i], err = strconv.Atoi(d)
if err != nil {
return nil, err
}
}
return out, nil
}
// GetIntSlice return the []int value of a flag with the given name
func (f *FlagSet) GetIntSlice(name string) ([]int, error) {
val, err := f.getFlagType(name, "intSlice", intSliceConv)
if err != nil {
return []int{}, err
}
return val.([]int), nil
}
// IntSliceVar defines a intSlice flag with specified name, default value, and usage string.
// The argument p points to a []int variable in which to store the value of the flag.
func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) {
f.VarP(newIntSliceValue(value, p), name, "", usage)
}
// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
f.VarP(newIntSliceValue(value, p), name, shorthand, usage)
}
// IntSliceVar defines a int[] flag with specified name, default value, and usage string.
// The argument p points to a int[] variable in which to store the value of the flag.
func IntSliceVar(p *[]int, name string, value []int, usage string) {
CommandLine.VarP(newIntSliceValue(value, p), name, "", usage)
}
// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage)
}
// IntSlice defines a []int flag with specified name, default value, and usage string.
// The return value is the address of a []int variable that stores the value of the flag.
func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int {
p := []int{}
f.IntSliceVarP(&p, name, "", value, usage)
return &p
}
// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int {
p := []int{}
f.IntSliceVarP(&p, name, shorthand, value, usage)
return &p
}
// IntSlice defines a []int flag with specified name, default value, and usage string.
// The return value is the address of a []int variable that stores the value of the flag.
func IntSlice(name string, value []int, usage string) *[]int {
return CommandLine.IntSliceP(name, "", value, usage)
}
// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
func IntSliceP(name, shorthand string, value []int, usage string) *[]int {
return CommandLine.IntSliceP(name, shorthand, value, usage)
}

View File

@ -1,96 +0,0 @@
package pflag
import (
"fmt"
"net"
"strings"
)
var _ = strings.TrimSpace
// -- net.IP value
type ipValue net.IP
func newIPValue(val net.IP, p *net.IP) *ipValue {
*p = val
return (*ipValue)(p)
}
func (i *ipValue) String() string { return net.IP(*i).String() }
func (i *ipValue) Set(s string) error {
ip := net.ParseIP(strings.TrimSpace(s))
if ip == nil {
return fmt.Errorf("failed to parse IP: %q", s)
}
*i = ipValue(ip)
return nil
}
func (i *ipValue) Type() string {
return "ip"
}
func ipConv(sval string) (interface{}, error) {
ip := net.ParseIP(sval)
if ip != nil {
return ip, nil
}
return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
}
// GetIP return the net.IP value of a flag with the given name
func (f *FlagSet) GetIP(name string) (net.IP, error) {
val, err := f.getFlagType(name, "ip", ipConv)
if err != nil {
return nil, err
}
return val.(net.IP), nil
}
// IPVar defines an net.IP flag with specified name, default value, and usage string.
// The argument p points to an net.IP variable in which to store the value of the flag.
func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) {
f.VarP(newIPValue(value, p), name, "", usage)
}
// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
f.VarP(newIPValue(value, p), name, shorthand, usage)
}
// IPVar defines an net.IP flag with specified name, default value, and usage string.
// The argument p points to an net.IP variable in which to store the value of the flag.
func IPVar(p *net.IP, name string, value net.IP, usage string) {
CommandLine.VarP(newIPValue(value, p), name, "", usage)
}
// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
CommandLine.VarP(newIPValue(value, p), name, shorthand, usage)
}
// IP defines an net.IP flag with specified name, default value, and usage string.
// The return value is the address of an net.IP variable that stores the value of the flag.
func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP {
p := new(net.IP)
f.IPVarP(p, name, "", value, usage)
return p
}
// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP {
p := new(net.IP)
f.IPVarP(p, name, shorthand, value, usage)
return p
}
// IP defines an net.IP flag with specified name, default value, and usage string.
// The return value is the address of an net.IP variable that stores the value of the flag.
func IP(name string, value net.IP, usage string) *net.IP {
return CommandLine.IPP(name, "", value, usage)
}
// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
func IPP(name, shorthand string, value net.IP, usage string) *net.IP {
return CommandLine.IPP(name, shorthand, value, usage)
}

View File

@ -1,122 +0,0 @@
package pflag
import (
"fmt"
"net"
"strconv"
)
// -- net.IPMask value
type ipMaskValue net.IPMask
func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue {
*p = val
return (*ipMaskValue)(p)
}
func (i *ipMaskValue) String() string { return net.IPMask(*i).String() }
func (i *ipMaskValue) Set(s string) error {
ip := ParseIPv4Mask(s)
if ip == nil {
return fmt.Errorf("failed to parse IP mask: %q", s)
}
*i = ipMaskValue(ip)
return nil
}
func (i *ipMaskValue) Type() string {
return "ipMask"
}
// ParseIPv4Mask written in IP form (e.g. 255.255.255.0).
// This function should really belong to the net package.
func ParseIPv4Mask(s string) net.IPMask {
mask := net.ParseIP(s)
if mask == nil {
if len(s) != 8 {
return nil
}
// net.IPMask.String() actually outputs things like ffffff00
// so write a horrible parser for that as well :-(
m := []int{}
for i := 0; i < 4; i++ {
b := "0x" + s[2*i:2*i+2]
d, err := strconv.ParseInt(b, 0, 0)
if err != nil {
return nil
}
m = append(m, int(d))
}
s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3])
mask = net.ParseIP(s)
if mask == nil {
return nil
}
}
return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15])
}
func parseIPv4Mask(sval string) (interface{}, error) {
mask := ParseIPv4Mask(sval)
if mask == nil {
return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval)
}
return mask, nil
}
// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name
func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) {
val, err := f.getFlagType(name, "ipMask", parseIPv4Mask)
if err != nil {
return nil, err
}
return val.(net.IPMask), nil
}
// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
// The argument p points to an net.IPMask variable in which to store the value of the flag.
func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
f.VarP(newIPMaskValue(value, p), name, "", usage)
}
// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
f.VarP(newIPMaskValue(value, p), name, shorthand, usage)
}
// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
// The argument p points to an net.IPMask variable in which to store the value of the flag.
func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
CommandLine.VarP(newIPMaskValue(value, p), name, "", usage)
}
// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage)
}
// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
// The return value is the address of an net.IPMask variable that stores the value of the flag.
func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask {
p := new(net.IPMask)
f.IPMaskVarP(p, name, "", value, usage)
return p
}
// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
p := new(net.IPMask)
f.IPMaskVarP(p, name, shorthand, value, usage)
return p
}
// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
// The return value is the address of an net.IPMask variable that stores the value of the flag.
func IPMask(name string, value net.IPMask, usage string) *net.IPMask {
return CommandLine.IPMaskP(name, "", value, usage)
}
// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash.
func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
return CommandLine.IPMaskP(name, shorthand, value, usage)
}

View File

@ -1,100 +0,0 @@
package pflag
import (
"fmt"
"net"
"strings"
)
// IPNet adapts net.IPNet for use as a flag.
type ipNetValue net.IPNet
func (ipnet ipNetValue) String() string {
n := net.IPNet(ipnet)
return n.String()
}
func (ipnet *ipNetValue) Set(value string) error {
_, n, err := net.ParseCIDR(strings.TrimSpace(value))
if err != nil {
return err
}
*ipnet = ipNetValue(*n)
return nil
}
func (*ipNetValue) Type() string {
return "ipNet"
}
var _ = strings.TrimSpace
func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue {
*p = val
return (*ipNetValue)(p)
}
func ipNetConv(sval string) (interface{}, error) {
_, n, err := net.ParseCIDR(strings.TrimSpace(sval))
if err == nil {
return *n, nil
}
return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval)
}
// GetIPNet return the net.IPNet value of a flag with the given name
func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) {
val, err := f.getFlagType(name, "ipNet", ipNetConv)
if err != nil {
return net.IPNet{}, err
}
return val.(net.IPNet), nil
}
// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
// The argument p points to an net.IPNet variable in which to store the value of the flag.
func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
f.VarP(newIPNetValue(value, p), name, "", usage)
}
// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
f.VarP(newIPNetValue(value, p), name, shorthand, usage)
}
// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
// The argument p points to an net.IPNet variable in which to store the value of the flag.
func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
CommandLine.VarP(newIPNetValue(value, p), name, "", usage)
}
// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage)
}
// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
// The return value is the address of an net.IPNet variable that stores the value of the flag.
func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet {
p := new(net.IPNet)
f.IPNetVarP(p, name, "", value, usage)
return p
}
// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
p := new(net.IPNet)
f.IPNetVarP(p, name, shorthand, value, usage)
return p
}
// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
// The return value is the address of an net.IPNet variable that stores the value of the flag.
func IPNet(name string, value net.IPNet, usage string) *net.IPNet {
return CommandLine.IPNetP(name, "", value, usage)
}
// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
return CommandLine.IPNetP(name, shorthand, value, usage)
}

View File

@ -1,82 +0,0 @@
package pflag
import "fmt"
// -- string Value
type stringValue string
func newStringValue(val string, p *string) *stringValue {
*p = val
return (*stringValue)(p)
}
func (s *stringValue) Set(val string) error {
*s = stringValue(val)
return nil
}
func (s *stringValue) Type() string {
return "string"
}
func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) }
func stringConv(sval string) (interface{}, error) {
return sval, nil
}
// GetString return the string value of a flag with the given name
func (f *FlagSet) GetString(name string) (string, error) {
val, err := f.getFlagType(name, "string", stringConv)
if err != nil {
return "", err
}
return val.(string), nil
}
// StringVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a string variable in which to store the value of the flag.
func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
f.VarP(newStringValue(value, p), name, "", usage)
}
// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) {
f.VarP(newStringValue(value, p), name, shorthand, usage)
}
// StringVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a string variable in which to store the value of the flag.
func StringVar(p *string, name string, value string, usage string) {
CommandLine.VarP(newStringValue(value, p), name, "", usage)
}
// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
func StringVarP(p *string, name, shorthand string, value string, usage string) {
CommandLine.VarP(newStringValue(value, p), name, shorthand, usage)
}
// String defines a string flag with specified name, default value, and usage string.
// The return value is the address of a string variable that stores the value of the flag.
func (f *FlagSet) String(name string, value string, usage string) *string {
p := new(string)
f.StringVarP(p, name, "", value, usage)
return p
}
// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string {
p := new(string)
f.StringVarP(p, name, shorthand, value, usage)
return p
}
// String defines a string flag with specified name, default value, and usage string.
// The return value is the address of a string variable that stores the value of the flag.
func String(name string, value string, usage string) *string {
return CommandLine.StringP(name, "", value, usage)
}
// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
func StringP(name, shorthand string, value string, usage string) *string {
return CommandLine.StringP(name, shorthand, value, usage)
}

View File

@ -1,105 +0,0 @@
package pflag
import (
"fmt"
"strings"
)
var _ = fmt.Fprint
// -- stringSlice Value
type stringSliceValue struct {
value *[]string
changed bool
}
func newStringSliceValue(val []string, p *[]string) *stringSliceValue {
ssv := new(stringSliceValue)
ssv.value = p
*ssv.value = val
return ssv
}
func (s *stringSliceValue) Set(val string) error {
v := strings.Split(val, ",")
if !s.changed {
*s.value = v
} else {
*s.value = append(*s.value, v...)
}
s.changed = true
return nil
}
func (s *stringSliceValue) Type() string {
return "stringSlice"
}
func (s *stringSliceValue) String() string { return "[" + strings.Join(*s.value, ",") + "]" }
func stringSliceConv(sval string) (interface{}, error) {
sval = strings.Trim(sval, "[]")
// An empty string would cause a slice with one (empty) string
if len(sval) == 0 {
return []string{}, nil
}
v := strings.Split(sval, ",")
return v, nil
}
// GetStringSlice return the []string value of a flag with the given name
func (f *FlagSet) GetStringSlice(name string) ([]string, error) {
val, err := f.getFlagType(name, "stringSlice", stringSliceConv)
if err != nil {
return []string{}, err
}
return val.([]string), nil
}
// StringSliceVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a []string variable in which to store the value of the flag.
func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) {
f.VarP(newStringSliceValue(value, p), name, "", usage)
}
// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
f.VarP(newStringSliceValue(value, p), name, shorthand, usage)
}
// StringSliceVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a []string variable in which to store the value of the flag.
func StringSliceVar(p *[]string, name string, value []string, usage string) {
CommandLine.VarP(newStringSliceValue(value, p), name, "", usage)
}
// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage)
}
// StringSlice defines a string flag with specified name, default value, and usage string.
// The return value is the address of a []string variable that stores the value of the flag.
func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string {
p := []string{}
f.StringSliceVarP(&p, name, "", value, usage)
return &p
}
// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string {
p := []string{}
f.StringSliceVarP(&p, name, shorthand, value, usage)
return &p
}
// StringSlice defines a string flag with specified name, default value, and usage string.
// The return value is the address of a []string variable that stores the value of the flag.
func StringSlice(name string, value []string, usage string) *[]string {
return CommandLine.StringSliceP(name, "", value, usage)
}
// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
func StringSliceP(name, shorthand string, value []string, usage string) *[]string {
return CommandLine.StringSliceP(name, shorthand, value, usage)
}

View File

@ -1,91 +0,0 @@
package pflag
import (
"fmt"
"strconv"
)
// -- uint Value
type uintValue uint
func newUintValue(val uint, p *uint) *uintValue {
*p = val
return (*uintValue)(p)
}
func (i *uintValue) Set(s string) error {
v, err := strconv.ParseUint(s, 0, 64)
*i = uintValue(v)
return err
}
func (i *uintValue) Type() string {
return "uint"
}
func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) }
func uintConv(sval string) (interface{}, error) {
v, err := strconv.ParseUint(sval, 0, 0)
if err != nil {
return 0, err
}
return uint(v), nil
}
// GetUint return the uint value of a flag with the given name
func (f *FlagSet) GetUint(name string) (uint, error) {
val, err := f.getFlagType(name, "uint", uintConv)
if err != nil {
return 0, err
}
return val.(uint), nil
}
// UintVar defines a uint flag with specified name, default value, and usage string.
// The argument p points to a uint variable in which to store the value of the flag.
func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
f.VarP(newUintValue(value, p), name, "", usage)
}
// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) {
f.VarP(newUintValue(value, p), name, shorthand, usage)
}
// UintVar defines a uint flag with specified name, default value, and usage string.
// The argument p points to a uint variable in which to store the value of the flag.
func UintVar(p *uint, name string, value uint, usage string) {
CommandLine.VarP(newUintValue(value, p), name, "", usage)
}
// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
func UintVarP(p *uint, name, shorthand string, value uint, usage string) {
CommandLine.VarP(newUintValue(value, p), name, shorthand, usage)
}
// Uint defines a uint flag with specified name, default value, and usage string.
// The return value is the address of a uint variable that stores the value of the flag.
func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
p := new(uint)
f.UintVarP(p, name, "", value, usage)
return p
}
// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint {
p := new(uint)
f.UintVarP(p, name, shorthand, value, usage)
return p
}
// Uint defines a uint flag with specified name, default value, and usage string.
// The return value is the address of a uint variable that stores the value of the flag.
func Uint(name string, value uint, usage string) *uint {
return CommandLine.UintP(name, "", value, usage)
}
// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
func UintP(name, shorthand string, value uint, usage string) *uint {
return CommandLine.UintP(name, shorthand, value, usage)
}

View File

@ -1,89 +0,0 @@
package pflag
import (
"fmt"
"strconv"
)
// -- uint16 value
type uint16Value uint16
func newUint16Value(val uint16, p *uint16) *uint16Value {
*p = val
return (*uint16Value)(p)
}
func (i *uint16Value) String() string { return fmt.Sprintf("%d", *i) }
func (i *uint16Value) Set(s string) error {
v, err := strconv.ParseUint(s, 0, 16)
*i = uint16Value(v)
return err
}
func (i *uint16Value) Type() string {
return "uint16"
}
func uint16Conv(sval string) (interface{}, error) {
v, err := strconv.ParseUint(sval, 0, 16)
if err != nil {
return 0, err
}
return uint16(v), nil
}
// GetUint16 return the uint16 value of a flag with the given name
func (f *FlagSet) GetUint16(name string) (uint16, error) {
val, err := f.getFlagType(name, "uint16", uint16Conv)
if err != nil {
return 0, err
}
return val.(uint16), nil
}
// Uint16Var defines a uint flag with specified name, default value, and usage string.
// The argument p points to a uint variable in which to store the value of the flag.
func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) {
f.VarP(newUint16Value(value, p), name, "", usage)
}
// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
f.VarP(newUint16Value(value, p), name, shorthand, usage)
}
// Uint16Var defines a uint flag with specified name, default value, and usage string.
// The argument p points to a uint variable in which to store the value of the flag.
func Uint16Var(p *uint16, name string, value uint16, usage string) {
CommandLine.VarP(newUint16Value(value, p), name, "", usage)
}
// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage)
}
// Uint16 defines a uint flag with specified name, default value, and usage string.
// The return value is the address of a uint variable that stores the value of the flag.
func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 {
p := new(uint16)
f.Uint16VarP(p, name, "", value, usage)
return p
}
// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
p := new(uint16)
f.Uint16VarP(p, name, shorthand, value, usage)
return p
}
// Uint16 defines a uint flag with specified name, default value, and usage string.
// The return value is the address of a uint variable that stores the value of the flag.
func Uint16(name string, value uint16, usage string) *uint16 {
return CommandLine.Uint16P(name, "", value, usage)
}
// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
func Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
return CommandLine.Uint16P(name, shorthand, value, usage)
}

View File

@ -1,89 +0,0 @@
package pflag
import (
"fmt"
"strconv"
)
// -- uint16 value
type uint32Value uint32
func newUint32Value(val uint32, p *uint32) *uint32Value {
*p = val
return (*uint32Value)(p)
}
func (i *uint32Value) String() string { return fmt.Sprintf("%d", *i) }
func (i *uint32Value) Set(s string) error {
v, err := strconv.ParseUint(s, 0, 32)
*i = uint32Value(v)
return err
}
func (i *uint32Value) Type() string {
return "uint32"
}
func uint32Conv(sval string) (interface{}, error) {
v, err := strconv.ParseUint(sval, 0, 32)
if err != nil {
return 0, err
}
return uint32(v), nil
}
// GetUint32 return the uint32 value of a flag with the given name
func (f *FlagSet) GetUint32(name string) (uint32, error) {
val, err := f.getFlagType(name, "uint32", uint32Conv)
if err != nil {
return 0, err
}
return val.(uint32), nil
}
// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
// The argument p points to a uint32 variable in which to store the value of the flag.
func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) {
f.VarP(newUint32Value(value, p), name, "", usage)
}
// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
f.VarP(newUint32Value(value, p), name, shorthand, usage)
}
// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
// The argument p points to a uint32 variable in which to store the value of the flag.
func Uint32Var(p *uint32, name string, value uint32, usage string) {
CommandLine.VarP(newUint32Value(value, p), name, "", usage)
}
// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage)
}
// Uint32 defines a uint32 flag with specified name, default value, and usage string.
// The return value is the address of a uint32 variable that stores the value of the flag.
func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 {
p := new(uint32)
f.Uint32VarP(p, name, "", value, usage)
return p
}
// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
p := new(uint32)
f.Uint32VarP(p, name, shorthand, value, usage)
return p
}
// Uint32 defines a uint32 flag with specified name, default value, and usage string.
// The return value is the address of a uint32 variable that stores the value of the flag.
func Uint32(name string, value uint32, usage string) *uint32 {
return CommandLine.Uint32P(name, "", value, usage)
}
// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
func Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
return CommandLine.Uint32P(name, shorthand, value, usage)
}

View File

@ -1,91 +0,0 @@
package pflag
import (
"fmt"
"strconv"
)
// -- uint64 Value
type uint64Value uint64
func newUint64Value(val uint64, p *uint64) *uint64Value {
*p = val
return (*uint64Value)(p)
}
func (i *uint64Value) Set(s string) error {
v, err := strconv.ParseUint(s, 0, 64)
*i = uint64Value(v)
return err
}
func (i *uint64Value) Type() string {
return "uint64"
}
func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) }
func uint64Conv(sval string) (interface{}, error) {
v, err := strconv.ParseUint(sval, 0, 64)
if err != nil {
return 0, err
}
return uint64(v), nil
}
// GetUint64 return the uint64 value of a flag with the given name
func (f *FlagSet) GetUint64(name string) (uint64, error) {
val, err := f.getFlagType(name, "uint64", uint64Conv)
if err != nil {
return 0, err
}
return val.(uint64), nil
}
// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
// The argument p points to a uint64 variable in which to store the value of the flag.
func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
f.VarP(newUint64Value(value, p), name, "", usage)
}
// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
f.VarP(newUint64Value(value, p), name, shorthand, usage)
}
// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
// The argument p points to a uint64 variable in which to store the value of the flag.
func Uint64Var(p *uint64, name string, value uint64, usage string) {
CommandLine.VarP(newUint64Value(value, p), name, "", usage)
}
// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage)
}
// Uint64 defines a uint64 flag with specified name, default value, and usage string.
// The return value is the address of a uint64 variable that stores the value of the flag.
func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
p := new(uint64)
f.Uint64VarP(p, name, "", value, usage)
return p
}
// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
p := new(uint64)
f.Uint64VarP(p, name, shorthand, value, usage)
return p
}
// Uint64 defines a uint64 flag with specified name, default value, and usage string.
// The return value is the address of a uint64 variable that stores the value of the flag.
func Uint64(name string, value uint64, usage string) *uint64 {
return CommandLine.Uint64P(name, "", value, usage)
}
// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
func Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
return CommandLine.Uint64P(name, shorthand, value, usage)
}

View File

@ -1,91 +0,0 @@
package pflag
import (
"fmt"
"strconv"
)
// -- uint8 Value
type uint8Value uint8
func newUint8Value(val uint8, p *uint8) *uint8Value {
*p = val
return (*uint8Value)(p)
}
func (i *uint8Value) Set(s string) error {
v, err := strconv.ParseUint(s, 0, 8)
*i = uint8Value(v)
return err
}
func (i *uint8Value) Type() string {
return "uint8"
}
func (i *uint8Value) String() string { return fmt.Sprintf("%v", *i) }
func uint8Conv(sval string) (interface{}, error) {
v, err := strconv.ParseUint(sval, 0, 8)
if err != nil {
return 0, err
}
return uint8(v), nil
}
// GetUint8 return the uint8 value of a flag with the given name
func (f *FlagSet) GetUint8(name string) (uint8, error) {
val, err := f.getFlagType(name, "uint8", uint8Conv)
if err != nil {
return 0, err
}
return val.(uint8), nil
}
// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
// The argument p points to a uint8 variable in which to store the value of the flag.
func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) {
f.VarP(newUint8Value(value, p), name, "", usage)
}
// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
f.VarP(newUint8Value(value, p), name, shorthand, usage)
}
// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
// The argument p points to a uint8 variable in which to store the value of the flag.
func Uint8Var(p *uint8, name string, value uint8, usage string) {
CommandLine.VarP(newUint8Value(value, p), name, "", usage)
}
// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage)
}
// Uint8 defines a uint8 flag with specified name, default value, and usage string.
// The return value is the address of a uint8 variable that stores the value of the flag.
func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 {
p := new(uint8)
f.Uint8VarP(p, name, "", value, usage)
return p
}
// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
p := new(uint8)
f.Uint8VarP(p, name, shorthand, value, usage)
return p
}
// Uint8 defines a uint8 flag with specified name, default value, and usage string.
// The return value is the address of a uint8 variable that stores the value of the flag.
func Uint8(name string, value uint8, usage string) *uint8 {
return CommandLine.Uint8P(name, "", value, usage)
}
// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
func Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
return CommandLine.Uint8P(name, shorthand, value, usage)
}

View File

@ -1,69 +0,0 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
ROOT=$(dirname "${BASH_SOURCE}")/..
# Some useful colors.
if [[ -z "${color_start-}" ]]; then
declare -r color_start="\033["
declare -r color_red="${color_start}0;31m"
declare -r color_yellow="${color_start}0;33m"
declare -r color_green="${color_start}0;32m"
declare -r color_norm="${color_start}0m"
fi
SILENT=true
function is-excluded {
for e in $EXCLUDE; do
if [[ $1 -ef ${BASH_SOURCE} ]]; then
return
fi
if [[ $1 -ef "$ROOT/hack/$e" ]]; then
return
fi
done
return 1
}
while getopts ":v" opt; do
case $opt in
v)
SILENT=false
;;
\?)
echo "Invalid flag: -$OPTARG" >&2
exit 1
;;
esac
done
if $SILENT ; then
echo "Running in the silent mode, run with -v if you want to see script logs."
fi
EXCLUDE="all.sh"
ret=0
for t in `ls $ROOT/verify/*.sh`
do
if is-excluded $t ; then
echo "Skipping $t"
continue
fi
if $SILENT ; then
echo -e "Verifying $t"
if bash "$t" &> /dev/null; then
echo -e "${color_green}SUCCESS${color_norm}"
else
echo -e "${color_red}FAILED${color_norm}"
ret=1
fi
else
bash "$t" || ret=1
fi
done
exit $ret

View File

@ -1,19 +0,0 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
ROOT=$(dirname "${BASH_SOURCE}")/..
pushd "${ROOT}" > /dev/null
GOFMT=${GOFMT:-"gofmt"}
bad_files=$(find . -name '*.go' | xargs $GOFMT -s -l)
if [[ -n "${bad_files}" ]]; then
echo "!!! '$GOFMT' needs to be run on the following files: "
echo "${bad_files}"
exit 1
fi
# ex: ts=2 sw=2 et filetype=sh

View File

@ -1,15 +0,0 @@
#!/bin/bash
ROOT=$(dirname "${BASH_SOURCE}")/..
GOLINT=${GOLINT:-"golint"}
pushd "${ROOT}" > /dev/null
bad_files=$($GOLINT -min_confidence=0.9 ./...)
if [[ -n "${bad_files}" ]]; then
echo "!!! '$GOLINT' problems: "
echo "${bad_files}"
exit 1
fi
popd > /dev/null
# ex: ts=2 sw=2 et filetype=sh

View File

@ -1,188 +0,0 @@
Copyright (c) 2011-2014 - Canonical Inc.
This software is licensed under the LGPLv3, included below.
As a special exception to the GNU Lesser General Public License version 3
("LGPL3"), the copyright holders of this Library give you permission to
convey to a third party a Combined Work that links statically or dynamically
to this Library without providing any Minimal Corresponding Source or
Minimal Application Code as set out in 4d or providing the installation
information set out in section 4e, provided that you comply with the other
provisions of LGPL3 and provided that you meet, for the Application the
terms and conditions of the license(s) which apply to the Application.
Except as stated in this special exception, the provisions of LGPL3 will
continue to comply in full to this Library. If you modify this Library, you
may apply this exception to your version of this Library, but you are not
obliged to do so. If you do not wish to do so, delete this exception
statement from your version. This exception does not (and cannot) modify any
license terms which apply to the Application, with which you must still
comply.
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

View File

@ -1,31 +0,0 @@
The following files were ported to Go from C files of libyaml, and thus
are still covered by their original copyright and license:
apic.go
emitterc.go
parserc.go
readerc.go
scannerc.go
writerc.go
yamlh.go
yamlprivateh.go
Copyright (c) 2006 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,131 +0,0 @@
# YAML support for the Go language
Introduction
------------
The yaml package enables Go programs to comfortably encode and decode YAML
values. It was developed within [Canonical](https://www.canonical.com) as
part of the [juju](https://juju.ubuntu.com) project, and is based on a
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
C library to parse and generate YAML data quickly and reliably.
Compatibility
-------------
The yaml package supports most of YAML 1.1 and 1.2, including support for
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
implemented, and base-60 floats from YAML 1.1 are purposefully not
supported since they're a poor design and are gone in YAML 1.2.
Installation and usage
----------------------
The import path for the package is *gopkg.in/yaml.v2*.
To install it, run:
go get gopkg.in/yaml.v2
API documentation
-----------------
If opened in a browser, the import path itself leads to the API documentation:
* [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
API stability
-------------
The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
License
-------
The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
Example
-------
```Go
package main
import (
"fmt"
"log"
"gopkg.in/yaml.v2"
)
var data = `
a: Easy!
b:
c: 2
d: [3, 4]
`
type T struct {
A string
B struct {
RenamedC int `yaml:"c"`
D []int `yaml:",flow"`
}
}
func main() {
t := T{}
err := yaml.Unmarshal([]byte(data), &t)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- t:\n%v\n\n", t)
d, err := yaml.Marshal(&t)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- t dump:\n%s\n\n", string(d))
m := make(map[interface{}]interface{})
err = yaml.Unmarshal([]byte(data), &m)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- m:\n%v\n\n", m)
d, err = yaml.Marshal(&m)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- m dump:\n%s\n\n", string(d))
}
```
This example will generate the following output:
```
--- t:
{Easy! {2 [3 4]}}
--- t dump:
a: Easy!
b:
c: 2
d: [3, 4]
--- m:
map[a:Easy! b:map[c:2 d:[3 4]]]
--- m dump:
a: Easy!
b:
c: 2
d:
- 3
- 4
```

View File

@ -1,742 +0,0 @@
package yaml
import (
"io"
"os"
)
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
// Check if we can move the queue at the beginning of the buffer.
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
if parser.tokens_head != len(parser.tokens) {
copy(parser.tokens, parser.tokens[parser.tokens_head:])
}
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
parser.tokens_head = 0
}
parser.tokens = append(parser.tokens, *token)
if pos < 0 {
return
}
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
parser.tokens[parser.tokens_head+pos] = *token
}
// Create a new parser object.
func yaml_parser_initialize(parser *yaml_parser_t) bool {
*parser = yaml_parser_t{
raw_buffer: make([]byte, 0, input_raw_buffer_size),
buffer: make([]byte, 0, input_buffer_size),
}
return true
}
// Destroy a parser object.
func yaml_parser_delete(parser *yaml_parser_t) {
*parser = yaml_parser_t{}
}
// String read handler.
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
if parser.input_pos == len(parser.input) {
return 0, io.EOF
}
n = copy(buffer, parser.input[parser.input_pos:])
parser.input_pos += n
return n, nil
}
// File read handler.
func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
return parser.input_file.Read(buffer)
}
// Set a string input.
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_string_read_handler
parser.input = input
parser.input_pos = 0
}
// Set a file input.
func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_file_read_handler
parser.input_file = file
}
// Set the source encoding.
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
if parser.encoding != yaml_ANY_ENCODING {
panic("must set the encoding only once")
}
parser.encoding = encoding
}
// Create a new emitter object.
func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
*emitter = yaml_emitter_t{
buffer: make([]byte, output_buffer_size),
raw_buffer: make([]byte, 0, output_raw_buffer_size),
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
events: make([]yaml_event_t, 0, initial_queue_size),
}
return true
}
// Destroy an emitter object.
func yaml_emitter_delete(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{}
}
// String write handler.
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
return nil
}
// File write handler.
func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_file.Write(buffer)
return err
}
// Set a string output.
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_string_write_handler
emitter.output_buffer = output_buffer
}
// Set a file output.
func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_file_write_handler
emitter.output_file = file
}
// Set the output encoding.
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
if emitter.encoding != yaml_ANY_ENCODING {
panic("must set the output encoding only once")
}
emitter.encoding = encoding
}
// Set the canonical output style.
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
emitter.canonical = canonical
}
//// Set the indentation increment.
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
if indent < 2 || indent > 9 {
indent = 2
}
emitter.best_indent = indent
}
// Set the preferred line width.
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
if width < 0 {
width = -1
}
emitter.best_width = width
}
// Set if unescaped non-ASCII characters are allowed.
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
emitter.unicode = unicode
}
// Set the preferred line break character.
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
emitter.line_break = line_break
}
///*
// * Destroy a token object.
// */
//
//YAML_DECLARE(void)
//yaml_token_delete(yaml_token_t *token)
//{
// assert(token); // Non-NULL token object expected.
//
// switch (token.type)
// {
// case YAML_TAG_DIRECTIVE_TOKEN:
// yaml_free(token.data.tag_directive.handle);
// yaml_free(token.data.tag_directive.prefix);
// break;
//
// case YAML_ALIAS_TOKEN:
// yaml_free(token.data.alias.value);
// break;
//
// case YAML_ANCHOR_TOKEN:
// yaml_free(token.data.anchor.value);
// break;
//
// case YAML_TAG_TOKEN:
// yaml_free(token.data.tag.handle);
// yaml_free(token.data.tag.suffix);
// break;
//
// case YAML_SCALAR_TOKEN:
// yaml_free(token.data.scalar.value);
// break;
//
// default:
// break;
// }
//
// memset(token, 0, sizeof(yaml_token_t));
//}
//
///*
// * Check if a string is a valid UTF-8 sequence.
// *
// * Check 'reader.c' for more details on UTF-8 encoding.
// */
//
//static int
//yaml_check_utf8(yaml_char_t *start, size_t length)
//{
// yaml_char_t *end = start+length;
// yaml_char_t *pointer = start;
//
// while (pointer < end) {
// unsigned char octet;
// unsigned int width;
// unsigned int value;
// size_t k;
//
// octet = pointer[0];
// width = (octet & 0x80) == 0x00 ? 1 :
// (octet & 0xE0) == 0xC0 ? 2 :
// (octet & 0xF0) == 0xE0 ? 3 :
// (octet & 0xF8) == 0xF0 ? 4 : 0;
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
// if (!width) return 0;
// if (pointer+width > end) return 0;
// for (k = 1; k < width; k ++) {
// octet = pointer[k];
// if ((octet & 0xC0) != 0x80) return 0;
// value = (value << 6) + (octet & 0x3F);
// }
// if (!((width == 1) ||
// (width == 2 && value >= 0x80) ||
// (width == 3 && value >= 0x800) ||
// (width == 4 && value >= 0x10000))) return 0;
//
// pointer += width;
// }
//
// return 1;
//}
//
// Create STREAM-START.
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
*event = yaml_event_t{
typ: yaml_STREAM_START_EVENT,
encoding: encoding,
}
return true
}
// Create STREAM-END.
func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_STREAM_END_EVENT,
}
return true
}
// Create DOCUMENT-START.
func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t, implicit bool) bool {
*event = yaml_event_t{
typ: yaml_DOCUMENT_START_EVENT,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: implicit,
}
return true
}
// Create DOCUMENT-END.
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
*event = yaml_event_t{
typ: yaml_DOCUMENT_END_EVENT,
implicit: implicit,
}
return true
}
///*
// * Create ALIAS.
// */
//
//YAML_DECLARE(int)
//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
//{
// mark yaml_mark_t = { 0, 0, 0 }
// anchor_copy *yaml_char_t = NULL
//
// assert(event) // Non-NULL event object is expected.
// assert(anchor) // Non-NULL anchor is expected.
//
// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
//
// anchor_copy = yaml_strdup(anchor)
// if (!anchor_copy)
// return 0
//
// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
//
// return 1
//}
// Create SCALAR.
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
*event = yaml_event_t{
typ: yaml_SCALAR_EVENT,
anchor: anchor,
tag: tag,
value: value,
implicit: plain_implicit,
quoted_implicit: quoted_implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-START.
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-END.
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_END_EVENT,
}
return true
}
// Create MAPPING-START.
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
*event = yaml_event_t{
typ: yaml_MAPPING_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create MAPPING-END.
func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_MAPPING_END_EVENT,
}
return true
}
// Destroy an event object.
func yaml_event_delete(event *yaml_event_t) {
*event = yaml_event_t{}
}
///*
// * Create a document object.
// */
//
//YAML_DECLARE(int)
//yaml_document_initialize(document *yaml_document_t,
// version_directive *yaml_version_directive_t,
// tag_directives_start *yaml_tag_directive_t,
// tag_directives_end *yaml_tag_directive_t,
// start_implicit int, end_implicit int)
//{
// struct {
// error yaml_error_type_t
// } context
// struct {
// start *yaml_node_t
// end *yaml_node_t
// top *yaml_node_t
// } nodes = { NULL, NULL, NULL }
// version_directive_copy *yaml_version_directive_t = NULL
// struct {
// start *yaml_tag_directive_t
// end *yaml_tag_directive_t
// top *yaml_tag_directive_t
// } tag_directives_copy = { NULL, NULL, NULL }
// value yaml_tag_directive_t = { NULL, NULL }
// mark yaml_mark_t = { 0, 0, 0 }
//
// assert(document) // Non-NULL document object is expected.
// assert((tag_directives_start && tag_directives_end) ||
// (tag_directives_start == tag_directives_end))
// // Valid tag directives are expected.
//
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
//
// if (version_directive) {
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
// if (!version_directive_copy) goto error
// version_directive_copy.major = version_directive.major
// version_directive_copy.minor = version_directive.minor
// }
//
// if (tag_directives_start != tag_directives_end) {
// tag_directive *yaml_tag_directive_t
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
// goto error
// for (tag_directive = tag_directives_start
// tag_directive != tag_directives_end; tag_directive ++) {
// assert(tag_directive.handle)
// assert(tag_directive.prefix)
// if (!yaml_check_utf8(tag_directive.handle,
// strlen((char *)tag_directive.handle)))
// goto error
// if (!yaml_check_utf8(tag_directive.prefix,
// strlen((char *)tag_directive.prefix)))
// goto error
// value.handle = yaml_strdup(tag_directive.handle)
// value.prefix = yaml_strdup(tag_directive.prefix)
// if (!value.handle || !value.prefix) goto error
// if (!PUSH(&context, tag_directives_copy, value))
// goto error
// value.handle = NULL
// value.prefix = NULL
// }
// }
//
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
// tag_directives_copy.start, tag_directives_copy.top,
// start_implicit, end_implicit, mark, mark)
//
// return 1
//
//error:
// STACK_DEL(&context, nodes)
// yaml_free(version_directive_copy)
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
// }
// STACK_DEL(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
//
// return 0
//}
//
///*
// * Destroy a document object.
// */
//
//YAML_DECLARE(void)
//yaml_document_delete(document *yaml_document_t)
//{
// struct {
// error yaml_error_type_t
// } context
// tag_directive *yaml_tag_directive_t
//
// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
//
// assert(document) // Non-NULL document object is expected.
//
// while (!STACK_EMPTY(&context, document.nodes)) {
// node yaml_node_t = POP(&context, document.nodes)
// yaml_free(node.tag)
// switch (node.type) {
// case YAML_SCALAR_NODE:
// yaml_free(node.data.scalar.value)
// break
// case YAML_SEQUENCE_NODE:
// STACK_DEL(&context, node.data.sequence.items)
// break
// case YAML_MAPPING_NODE:
// STACK_DEL(&context, node.data.mapping.pairs)
// break
// default:
// assert(0) // Should not happen.
// }
// }
// STACK_DEL(&context, document.nodes)
//
// yaml_free(document.version_directive)
// for (tag_directive = document.tag_directives.start
// tag_directive != document.tag_directives.end
// tag_directive++) {
// yaml_free(tag_directive.handle)
// yaml_free(tag_directive.prefix)
// }
// yaml_free(document.tag_directives.start)
//
// memset(document, 0, sizeof(yaml_document_t))
//}
//
///**
// * Get a document node.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_node(document *yaml_document_t, index int)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
// return document.nodes.start + index - 1
// }
// return NULL
//}
//
///**
// * Get the root object.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_root_node(document *yaml_document_t)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (document.nodes.top != document.nodes.start) {
// return document.nodes.start
// }
// return NULL
//}
//
///*
// * Add a scalar node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_scalar(document *yaml_document_t,
// tag *yaml_char_t, value *yaml_char_t, length int,
// style yaml_scalar_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// value_copy *yaml_char_t = NULL
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
// assert(value) // Non-NULL value is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (length < 0) {
// length = strlen((char *)value)
// }
//
// if (!yaml_check_utf8(value, length)) goto error
// value_copy = yaml_malloc(length+1)
// if (!value_copy) goto error
// memcpy(value_copy, value, length)
// value_copy[length] = '\0'
//
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// yaml_free(tag_copy)
// yaml_free(value_copy)
//
// return 0
//}
//
///*
// * Add a sequence node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_sequence(document *yaml_document_t,
// tag *yaml_char_t, style yaml_sequence_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_item_t
// end *yaml_node_item_t
// top *yaml_node_item_t
// } items = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
//
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, items)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Add a mapping node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_mapping(document *yaml_document_t,
// tag *yaml_char_t, style yaml_mapping_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_pair_t
// end *yaml_node_pair_t
// top *yaml_node_pair_t
// } pairs = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
//
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, pairs)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Append an item to a sequence node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_sequence_item(document *yaml_document_t,
// sequence int, item int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// assert(document) // Non-NULL document is required.
// assert(sequence > 0
// && document.nodes.start + sequence <= document.nodes.top)
// // Valid sequence id is required.
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
// // A sequence node is required.
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
// // Valid item id is required.
//
// if (!PUSH(&context,
// document.nodes.start[sequence-1].data.sequence.items, item))
// return 0
//
// return 1
//}
//
///*
// * Append a pair of a key and a value to a mapping node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_mapping_pair(document *yaml_document_t,
// mapping int, key int, value int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// pair yaml_node_pair_t
//
// assert(document) // Non-NULL document is required.
// assert(mapping > 0
// && document.nodes.start + mapping <= document.nodes.top)
// // Valid mapping id is required.
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
// // A mapping node is required.
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
// // Valid key id is required.
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
// // Valid value id is required.
//
// pair.key = key
// pair.value = value
//
// if (!PUSH(&context,
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
// return 0
//
// return 1
//}
//
//

View File

@ -1,683 +0,0 @@
package yaml
import (
"encoding"
"encoding/base64"
"fmt"
"math"
"reflect"
"strconv"
"time"
)
const (
documentNode = 1 << iota
mappingNode
sequenceNode
scalarNode
aliasNode
)
type node struct {
kind int
line, column int
tag string
value string
implicit bool
children []*node
anchors map[string]*node
}
// ----------------------------------------------------------------------------
// Parser, produces a node tree out of a libyaml event stream.
type parser struct {
parser yaml_parser_t
event yaml_event_t
doc *node
}
func newParser(b []byte) *parser {
p := parser{}
if !yaml_parser_initialize(&p.parser) {
panic("failed to initialize YAML emitter")
}
if len(b) == 0 {
b = []byte{'\n'}
}
yaml_parser_set_input_string(&p.parser, b)
p.skip()
if p.event.typ != yaml_STREAM_START_EVENT {
panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
}
p.skip()
return &p
}
func (p *parser) destroy() {
if p.event.typ != yaml_NO_EVENT {
yaml_event_delete(&p.event)
}
yaml_parser_delete(&p.parser)
}
func (p *parser) skip() {
if p.event.typ != yaml_NO_EVENT {
if p.event.typ == yaml_STREAM_END_EVENT {
failf("attempted to go past the end of stream; corrupted value?")
}
yaml_event_delete(&p.event)
}
if !yaml_parser_parse(&p.parser, &p.event) {
p.fail()
}
}
func (p *parser) fail() {
var where string
var line int
if p.parser.problem_mark.line != 0 {
line = p.parser.problem_mark.line
} else if p.parser.context_mark.line != 0 {
line = p.parser.context_mark.line
}
if line != 0 {
where = "line " + strconv.Itoa(line) + ": "
}
var msg string
if len(p.parser.problem) > 0 {
msg = p.parser.problem
} else {
msg = "unknown problem parsing YAML content"
}
failf("%s%s", where, msg)
}
func (p *parser) anchor(n *node, anchor []byte) {
if anchor != nil {
p.doc.anchors[string(anchor)] = n
}
}
func (p *parser) parse() *node {
switch p.event.typ {
case yaml_SCALAR_EVENT:
return p.scalar()
case yaml_ALIAS_EVENT:
return p.alias()
case yaml_MAPPING_START_EVENT:
return p.mapping()
case yaml_SEQUENCE_START_EVENT:
return p.sequence()
case yaml_DOCUMENT_START_EVENT:
return p.document()
case yaml_STREAM_END_EVENT:
// Happens when attempting to decode an empty buffer.
return nil
default:
panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
}
panic("unreachable")
}
func (p *parser) node(kind int) *node {
return &node{
kind: kind,
line: p.event.start_mark.line,
column: p.event.start_mark.column,
}
}
func (p *parser) document() *node {
n := p.node(documentNode)
n.anchors = make(map[string]*node)
p.doc = n
p.skip()
n.children = append(n.children, p.parse())
if p.event.typ != yaml_DOCUMENT_END_EVENT {
panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
}
p.skip()
return n
}
func (p *parser) alias() *node {
n := p.node(aliasNode)
n.value = string(p.event.anchor)
p.skip()
return n
}
func (p *parser) scalar() *node {
n := p.node(scalarNode)
n.value = string(p.event.value)
n.tag = string(p.event.tag)
n.implicit = p.event.implicit
p.anchor(n, p.event.anchor)
p.skip()
return n
}
func (p *parser) sequence() *node {
n := p.node(sequenceNode)
p.anchor(n, p.event.anchor)
p.skip()
for p.event.typ != yaml_SEQUENCE_END_EVENT {
n.children = append(n.children, p.parse())
}
p.skip()
return n
}
func (p *parser) mapping() *node {
n := p.node(mappingNode)
p.anchor(n, p.event.anchor)
p.skip()
for p.event.typ != yaml_MAPPING_END_EVENT {
n.children = append(n.children, p.parse(), p.parse())
}
p.skip()
return n
}
// ----------------------------------------------------------------------------
// Decoder, unmarshals a node into a provided value.
type decoder struct {
doc *node
aliases map[string]bool
mapType reflect.Type
terrors []string
}
var (
mapItemType = reflect.TypeOf(MapItem{})
durationType = reflect.TypeOf(time.Duration(0))
defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
ifaceType = defaultMapType.Elem()
)
func newDecoder() *decoder {
d := &decoder{mapType: defaultMapType}
d.aliases = make(map[string]bool)
return d
}
func (d *decoder) terror(n *node, tag string, out reflect.Value) {
if n.tag != "" {
tag = n.tag
}
value := n.value
if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
if len(value) > 10 {
value = " `" + value[:7] + "...`"
} else {
value = " `" + value + "`"
}
}
d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
}
func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
terrlen := len(d.terrors)
err := u.UnmarshalYAML(func(v interface{}) (err error) {
defer handleErr(&err)
d.unmarshal(n, reflect.ValueOf(v))
if len(d.terrors) > terrlen {
issues := d.terrors[terrlen:]
d.terrors = d.terrors[:terrlen]
return &TypeError{issues}
}
return nil
})
if e, ok := err.(*TypeError); ok {
d.terrors = append(d.terrors, e.Errors...)
return false
}
if err != nil {
fail(err)
}
return true
}
// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
// if a value is found to implement it.
// It returns the initialized and dereferenced out value, whether
// unmarshalling was already done by UnmarshalYAML, and if so whether
// its types unmarshalled appropriately.
//
// If n holds a null value, prepare returns before doing anything.
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
return out, false, false
}
again := true
for again {
again = false
if out.Kind() == reflect.Ptr {
if out.IsNil() {
out.Set(reflect.New(out.Type().Elem()))
}
out = out.Elem()
again = true
}
if out.CanAddr() {
if u, ok := out.Addr().Interface().(Unmarshaler); ok {
good = d.callUnmarshaler(n, u)
return out, true, good
}
}
}
return out, false, false
}
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
switch n.kind {
case documentNode:
return d.document(n, out)
case aliasNode:
return d.alias(n, out)
}
out, unmarshaled, good := d.prepare(n, out)
if unmarshaled {
return good
}
switch n.kind {
case scalarNode:
good = d.scalar(n, out)
case mappingNode:
good = d.mapping(n, out)
case sequenceNode:
good = d.sequence(n, out)
default:
panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
}
return good
}
func (d *decoder) document(n *node, out reflect.Value) (good bool) {
if len(n.children) == 1 {
d.doc = n
d.unmarshal(n.children[0], out)
return true
}
return false
}
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
an, ok := d.doc.anchors[n.value]
if !ok {
failf("unknown anchor '%s' referenced", n.value)
}
if d.aliases[n.value] {
failf("anchor '%s' value contains itself", n.value)
}
d.aliases[n.value] = true
good = d.unmarshal(an, out)
delete(d.aliases, n.value)
return good
}
var zeroValue reflect.Value
func resetMap(out reflect.Value) {
for _, k := range out.MapKeys() {
out.SetMapIndex(k, zeroValue)
}
}
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
var tag string
var resolved interface{}
if n.tag == "" && !n.implicit {
tag = yaml_STR_TAG
resolved = n.value
} else {
tag, resolved = resolve(n.tag, n.value)
if tag == yaml_BINARY_TAG {
data, err := base64.StdEncoding.DecodeString(resolved.(string))
if err != nil {
failf("!!binary value contains invalid base64 data")
}
resolved = string(data)
}
}
if resolved == nil {
if out.Kind() == reflect.Map && !out.CanAddr() {
resetMap(out)
} else {
out.Set(reflect.Zero(out.Type()))
}
return true
}
if s, ok := resolved.(string); ok && out.CanAddr() {
if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
err := u.UnmarshalText([]byte(s))
if err != nil {
fail(err)
}
return true
}
}
switch out.Kind() {
case reflect.String:
if tag == yaml_BINARY_TAG {
out.SetString(resolved.(string))
good = true
} else if resolved != nil {
out.SetString(n.value)
good = true
}
case reflect.Interface:
if resolved == nil {
out.Set(reflect.Zero(out.Type()))
} else {
out.Set(reflect.ValueOf(resolved))
}
good = true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch resolved := resolved.(type) {
case int:
if !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
}
case int64:
if !out.OverflowInt(resolved) {
out.SetInt(resolved)
good = true
}
case uint64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
}
case float64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
}
case string:
if out.Type() == durationType {
d, err := time.ParseDuration(resolved)
if err == nil {
out.SetInt(int64(d))
good = true
}
}
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch resolved := resolved.(type) {
case int:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
case int64:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
case uint64:
if !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
case float64:
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
}
case reflect.Bool:
switch resolved := resolved.(type) {
case bool:
out.SetBool(resolved)
good = true
}
case reflect.Float32, reflect.Float64:
switch resolved := resolved.(type) {
case int:
out.SetFloat(float64(resolved))
good = true
case int64:
out.SetFloat(float64(resolved))
good = true
case uint64:
out.SetFloat(float64(resolved))
good = true
case float64:
out.SetFloat(resolved)
good = true
}
case reflect.Ptr:
if out.Type().Elem() == reflect.TypeOf(resolved) {
// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
elem := reflect.New(out.Type().Elem())
elem.Elem().Set(reflect.ValueOf(resolved))
out.Set(elem)
good = true
}
}
if !good {
d.terror(n, tag, out)
}
return good
}
func settableValueOf(i interface{}) reflect.Value {
v := reflect.ValueOf(i)
sv := reflect.New(v.Type()).Elem()
sv.Set(v)
return sv
}
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
l := len(n.children)
var iface reflect.Value
switch out.Kind() {
case reflect.Slice:
out.Set(reflect.MakeSlice(out.Type(), l, l))
case reflect.Interface:
// No type hints. Will have to use a generic sequence.
iface = out
out = settableValueOf(make([]interface{}, l))
default:
d.terror(n, yaml_SEQ_TAG, out)
return false
}
et := out.Type().Elem()
j := 0
for i := 0; i < l; i++ {
e := reflect.New(et).Elem()
if ok := d.unmarshal(n.children[i], e); ok {
out.Index(j).Set(e)
j++
}
}
out.Set(out.Slice(0, j))
if iface.IsValid() {
iface.Set(out)
}
return true
}
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
switch out.Kind() {
case reflect.Struct:
return d.mappingStruct(n, out)
case reflect.Slice:
return d.mappingSlice(n, out)
case reflect.Map:
// okay
case reflect.Interface:
if d.mapType.Kind() == reflect.Map {
iface := out
out = reflect.MakeMap(d.mapType)
iface.Set(out)
} else {
slicev := reflect.New(d.mapType).Elem()
if !d.mappingSlice(n, slicev) {
return false
}
out.Set(slicev)
return true
}
default:
d.terror(n, yaml_MAP_TAG, out)
return false
}
outt := out.Type()
kt := outt.Key()
et := outt.Elem()
mapType := d.mapType
if outt.Key() == ifaceType && outt.Elem() == ifaceType {
d.mapType = outt
}
if out.IsNil() {
out.Set(reflect.MakeMap(outt))
}
l := len(n.children)
for i := 0; i < l; i += 2 {
if isMerge(n.children[i]) {
d.merge(n.children[i+1], out)
continue
}
k := reflect.New(kt).Elem()
if d.unmarshal(n.children[i], k) {
kkind := k.Kind()
if kkind == reflect.Interface {
kkind = k.Elem().Kind()
}
if kkind == reflect.Map || kkind == reflect.Slice {
failf("invalid map key: %#v", k.Interface())
}
e := reflect.New(et).Elem()
if d.unmarshal(n.children[i+1], e) {
out.SetMapIndex(k, e)
}
}
}
d.mapType = mapType
return true
}
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
outt := out.Type()
if outt.Elem() != mapItemType {
d.terror(n, yaml_MAP_TAG, out)
return false
}
mapType := d.mapType
d.mapType = outt
var slice []MapItem
var l = len(n.children)
for i := 0; i < l; i += 2 {
if isMerge(n.children[i]) {
d.merge(n.children[i+1], out)
continue
}
item := MapItem{}
k := reflect.ValueOf(&item.Key).Elem()
if d.unmarshal(n.children[i], k) {
v := reflect.ValueOf(&item.Value).Elem()
if d.unmarshal(n.children[i+1], v) {
slice = append(slice, item)
}
}
}
out.Set(reflect.ValueOf(slice))
d.mapType = mapType
return true
}
func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
sinfo, err := getStructInfo(out.Type())
if err != nil {
panic(err)
}
name := settableValueOf("")
l := len(n.children)
var inlineMap reflect.Value
var elemType reflect.Type
if sinfo.InlineMap != -1 {
inlineMap = out.Field(sinfo.InlineMap)
inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
elemType = inlineMap.Type().Elem()
}
for i := 0; i < l; i += 2 {
ni := n.children[i]
if isMerge(ni) {
d.merge(n.children[i+1], out)
continue
}
if !d.unmarshal(ni, name) {
continue
}
if info, ok := sinfo.FieldsMap[name.String()]; ok {
var field reflect.Value
if info.Inline == nil {
field = out.Field(info.Num)
} else {
field = out.FieldByIndex(info.Inline)
}
d.unmarshal(n.children[i+1], field)
} else if sinfo.InlineMap != -1 {
if inlineMap.IsNil() {
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
}
value := reflect.New(elemType).Elem()
d.unmarshal(n.children[i+1], value)
inlineMap.SetMapIndex(name, value)
}
}
return true
}
func failWantMap() {
failf("map merge requires map or sequence of maps as the value")
}
func (d *decoder) merge(n *node, out reflect.Value) {
switch n.kind {
case mappingNode:
d.unmarshal(n, out)
case aliasNode:
an, ok := d.doc.anchors[n.value]
if ok && an.kind != mappingNode {
failWantMap()
}
d.unmarshal(n, out)
case sequenceNode:
// Step backwards as earlier nodes take precedence.
for i := len(n.children) - 1; i >= 0; i-- {
ni := n.children[i]
if ni.kind == aliasNode {
an, ok := d.doc.anchors[ni.value]
if ok && an.kind != mappingNode {
failWantMap()
}
} else if ni.kind != mappingNode {
failWantMap()
}
d.unmarshal(ni, out)
}
default:
failWantMap()
}
}
func isMerge(n *node) bool {
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,306 +0,0 @@
package yaml
import (
"encoding"
"fmt"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
)
type encoder struct {
emitter yaml_emitter_t
event yaml_event_t
out []byte
flow bool
}
func newEncoder() (e *encoder) {
e = &encoder{}
e.must(yaml_emitter_initialize(&e.emitter))
yaml_emitter_set_output_string(&e.emitter, &e.out)
yaml_emitter_set_unicode(&e.emitter, true)
e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
e.emit()
e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
e.emit()
return e
}
func (e *encoder) finish() {
e.must(yaml_document_end_event_initialize(&e.event, true))
e.emit()
e.emitter.open_ended = false
e.must(yaml_stream_end_event_initialize(&e.event))
e.emit()
}
func (e *encoder) destroy() {
yaml_emitter_delete(&e.emitter)
}
func (e *encoder) emit() {
// This will internally delete the e.event value.
if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
e.must(false)
}
}
func (e *encoder) must(ok bool) {
if !ok {
msg := e.emitter.problem
if msg == "" {
msg = "unknown problem generating YAML content"
}
failf("%s", msg)
}
}
func (e *encoder) marshal(tag string, in reflect.Value) {
if !in.IsValid() {
e.nilv()
return
}
iface := in.Interface()
if m, ok := iface.(Marshaler); ok {
v, err := m.MarshalYAML()
if err != nil {
fail(err)
}
if v == nil {
e.nilv()
return
}
in = reflect.ValueOf(v)
} else if m, ok := iface.(encoding.TextMarshaler); ok {
text, err := m.MarshalText()
if err != nil {
fail(err)
}
in = reflect.ValueOf(string(text))
}
switch in.Kind() {
case reflect.Interface:
if in.IsNil() {
e.nilv()
} else {
e.marshal(tag, in.Elem())
}
case reflect.Map:
e.mapv(tag, in)
case reflect.Ptr:
if in.IsNil() {
e.nilv()
} else {
e.marshal(tag, in.Elem())
}
case reflect.Struct:
e.structv(tag, in)
case reflect.Slice:
if in.Type().Elem() == mapItemType {
e.itemsv(tag, in)
} else {
e.slicev(tag, in)
}
case reflect.String:
e.stringv(tag, in)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if in.Type() == durationType {
e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
} else {
e.intv(tag, in)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
e.uintv(tag, in)
case reflect.Float32, reflect.Float64:
e.floatv(tag, in)
case reflect.Bool:
e.boolv(tag, in)
default:
panic("cannot marshal type: " + in.Type().String())
}
}
func (e *encoder) mapv(tag string, in reflect.Value) {
e.mappingv(tag, func() {
keys := keyList(in.MapKeys())
sort.Sort(keys)
for _, k := range keys {
e.marshal("", k)
e.marshal("", in.MapIndex(k))
}
})
}
func (e *encoder) itemsv(tag string, in reflect.Value) {
e.mappingv(tag, func() {
slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
for _, item := range slice {
e.marshal("", reflect.ValueOf(item.Key))
e.marshal("", reflect.ValueOf(item.Value))
}
})
}
func (e *encoder) structv(tag string, in reflect.Value) {
sinfo, err := getStructInfo(in.Type())
if err != nil {
panic(err)
}
e.mappingv(tag, func() {
for _, info := range sinfo.FieldsList {
var value reflect.Value
if info.Inline == nil {
value = in.Field(info.Num)
} else {
value = in.FieldByIndex(info.Inline)
}
if info.OmitEmpty && isZero(value) {
continue
}
e.marshal("", reflect.ValueOf(info.Key))
e.flow = info.Flow
e.marshal("", value)
}
if sinfo.InlineMap >= 0 {
m := in.Field(sinfo.InlineMap)
if m.Len() > 0 {
e.flow = false
keys := keyList(m.MapKeys())
sort.Sort(keys)
for _, k := range keys {
if _, found := sinfo.FieldsMap[k.String()]; found {
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
}
e.marshal("", k)
e.flow = false
e.marshal("", m.MapIndex(k))
}
}
}
})
}
func (e *encoder) mappingv(tag string, f func()) {
implicit := tag == ""
style := yaml_BLOCK_MAPPING_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_MAPPING_STYLE
}
e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
e.emit()
f()
e.must(yaml_mapping_end_event_initialize(&e.event))
e.emit()
}
func (e *encoder) slicev(tag string, in reflect.Value) {
implicit := tag == ""
style := yaml_BLOCK_SEQUENCE_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_SEQUENCE_STYLE
}
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
e.emit()
n := in.Len()
for i := 0; i < n; i++ {
e.marshal("", in.Index(i))
}
e.must(yaml_sequence_end_event_initialize(&e.event))
e.emit()
}
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
//
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
// in YAML 1.2 and by this package, but these should be marshalled quoted for
// the time being for compatibility with other parsers.
func isBase60Float(s string) (result bool) {
// Fast path.
if s == "" {
return false
}
c := s[0]
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
return false
}
// Do the full match.
return base60float.MatchString(s)
}
// From http://yaml.org/type/float.html, except the regular expression there
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
func (e *encoder) stringv(tag string, in reflect.Value) {
var style yaml_scalar_style_t
s := in.String()
rtag, rs := resolve("", s)
if rtag == yaml_BINARY_TAG {
if tag == "" || tag == yaml_STR_TAG {
tag = rtag
s = rs.(string)
} else if tag == yaml_BINARY_TAG {
failf("explicitly tagged !!binary data must be base64-encoded")
} else {
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
}
}
if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
} else if strings.Contains(s, "\n") {
style = yaml_LITERAL_SCALAR_STYLE
} else {
style = yaml_PLAIN_SCALAR_STYLE
}
e.emitScalar(s, "", tag, style)
}
func (e *encoder) boolv(tag string, in reflect.Value) {
var s string
if in.Bool() {
s = "true"
} else {
s = "false"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) intv(tag string, in reflect.Value) {
s := strconv.FormatInt(in.Int(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) uintv(tag string, in reflect.Value) {
s := strconv.FormatUint(in.Uint(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) floatv(tag string, in reflect.Value) {
// FIXME: Handle 64 bits here.
s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
switch s {
case "+Inf":
s = ".inf"
case "-Inf":
s = "-.inf"
case "NaN":
s = ".nan"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) nilv() {
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
implicit := tag == ""
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
e.emit()
}

File diff suppressed because it is too large Load Diff

View File

@ -1,391 +0,0 @@
package yaml
import (
"io"
)
// Set the reader error and return 0.
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
parser.error = yaml_READER_ERROR
parser.problem = problem
parser.problem_offset = offset
parser.problem_value = value
return false
}
// Byte order marks.
const (
bom_UTF8 = "\xef\xbb\xbf"
bom_UTF16LE = "\xff\xfe"
bom_UTF16BE = "\xfe\xff"
)
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
// Ensure that we had enough bytes in the raw buffer.
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
if !yaml_parser_update_raw_buffer(parser) {
return false
}
}
// Determine the encoding.
buf := parser.raw_buffer
pos := parser.raw_buffer_pos
avail := len(buf) - pos
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
parser.encoding = yaml_UTF16LE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
parser.encoding = yaml_UTF16BE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
parser.encoding = yaml_UTF8_ENCODING
parser.raw_buffer_pos += 3
parser.offset += 3
} else {
parser.encoding = yaml_UTF8_ENCODING
}
return true
}
// Update the raw buffer.
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
size_read := 0
// Return if the raw buffer is full.
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
return true
}
// Return on EOF.
if parser.eof {
return true
}
// Move the remaining bytes in the raw buffer to the beginning.
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
}
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
parser.raw_buffer_pos = 0
// Call the read handler to fill the buffer.
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
if err == io.EOF {
parser.eof = true
} else if err != nil {
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
}
return true
}
// Ensure that the buffer contains at least `length` characters.
// Return true on success, false on failure.
//
// The length is supposed to be significantly less that the buffer size.
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
if parser.read_handler == nil {
panic("read handler must be set")
}
// If the EOF flag is set and the raw buffer is empty, do nothing.
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
return true
}
// Return if the buffer contains enough characters.
if parser.unread >= length {
return true
}
// Determine the input encoding if it is not known yet.
if parser.encoding == yaml_ANY_ENCODING {
if !yaml_parser_determine_encoding(parser) {
return false
}
}
// Move the unread characters to the beginning of the buffer.
buffer_len := len(parser.buffer)
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
buffer_len -= parser.buffer_pos
parser.buffer_pos = 0
} else if parser.buffer_pos == buffer_len {
buffer_len = 0
parser.buffer_pos = 0
}
// Open the whole buffer for writing, and cut it before returning.
parser.buffer = parser.buffer[:cap(parser.buffer)]
// Fill the buffer until it has enough characters.
first := true
for parser.unread < length {
// Fill the raw buffer if necessary.
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
if !yaml_parser_update_raw_buffer(parser) {
parser.buffer = parser.buffer[:buffer_len]
return false
}
}
first = false
// Decode the raw buffer.
inner:
for parser.raw_buffer_pos != len(parser.raw_buffer) {
var value rune
var width int
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
// Decode the next character.
switch parser.encoding {
case yaml_UTF8_ENCODING:
// Decode a UTF-8 character. Check RFC 3629
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
//
// The following table (taken from the RFC) is used for
// decoding.
//
// Char. number range | UTF-8 octet sequence
// (hexadecimal) | (binary)
// --------------------+------------------------------------
// 0000 0000-0000 007F | 0xxxxxxx
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
//
// Additionally, the characters in the range 0xD800-0xDFFF
// are prohibited as they are reserved for use with UTF-16
// surrogate pairs.
// Determine the length of the UTF-8 sequence.
octet := parser.raw_buffer[parser.raw_buffer_pos]
switch {
case octet&0x80 == 0x00:
width = 1
case octet&0xE0 == 0xC0:
width = 2
case octet&0xF0 == 0xE0:
width = 3
case octet&0xF8 == 0xF0:
width = 4
default:
// The leading octet is invalid.
return yaml_parser_set_reader_error(parser,
"invalid leading UTF-8 octet",
parser.offset, int(octet))
}
// Check if the raw buffer contains an incomplete character.
if width > raw_unread {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-8 octet sequence",
parser.offset, -1)
}
break inner
}
// Decode the leading octet.
switch {
case octet&0x80 == 0x00:
value = rune(octet & 0x7F)
case octet&0xE0 == 0xC0:
value = rune(octet & 0x1F)
case octet&0xF0 == 0xE0:
value = rune(octet & 0x0F)
case octet&0xF8 == 0xF0:
value = rune(octet & 0x07)
default:
value = 0
}
// Check and decode the trailing octets.
for k := 1; k < width; k++ {
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
// Check if the octet is valid.
if (octet & 0xC0) != 0x80 {
return yaml_parser_set_reader_error(parser,
"invalid trailing UTF-8 octet",
parser.offset+k, int(octet))
}
// Decode the octet.
value = (value << 6) + rune(octet&0x3F)
}
// Check the length of the sequence against the value.
switch {
case width == 1:
case width == 2 && value >= 0x80:
case width == 3 && value >= 0x800:
case width == 4 && value >= 0x10000:
default:
return yaml_parser_set_reader_error(parser,
"invalid length of a UTF-8 sequence",
parser.offset, -1)
}
// Check the range of the value.
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
return yaml_parser_set_reader_error(parser,
"invalid Unicode character",
parser.offset, int(value))
}
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
var low, high int
if parser.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
high, low = 1, 0
}
// The UTF-16 encoding is not as simple as one might
// naively think. Check RFC 2781
// (http://www.ietf.org/rfc/rfc2781.txt).
//
// Normally, two subsequent bytes describe a Unicode
// character. However a special technique (called a
// surrogate pair) is used for specifying character
// values larger than 0xFFFF.
//
// A surrogate pair consists of two pseudo-characters:
// high surrogate area (0xD800-0xDBFF)
// low surrogate area (0xDC00-0xDFFF)
//
// The following formulas are used for decoding
// and encoding characters using surrogate pairs:
//
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
// W1 = 110110yyyyyyyyyy
// W2 = 110111xxxxxxxxxx
//
// where U is the character value, W1 is the high surrogate
// area, W2 is the low surrogate area.
// Check for incomplete UTF-16 character.
if raw_unread < 2 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 character",
parser.offset, -1)
}
break inner
}
// Get the character.
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
// Check for unexpected low surrogate area.
if value&0xFC00 == 0xDC00 {
return yaml_parser_set_reader_error(parser,
"unexpected low surrogate area",
parser.offset, int(value))
}
// Check for a high surrogate area.
if value&0xFC00 == 0xD800 {
width = 4
// Check for incomplete surrogate pair.
if raw_unread < 4 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 surrogate pair",
parser.offset, -1)
}
break inner
}
// Get the next character.
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
// Check for a low surrogate area.
if value2&0xFC00 != 0xDC00 {
return yaml_parser_set_reader_error(parser,
"expected low surrogate area",
parser.offset+2, int(value2))
}
// Generate the value of the surrogate pair.
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
} else {
width = 2
}
default:
panic("impossible")
}
// Check if the character is in the allowed range:
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
// | [#x10000-#x10FFFF] (32 bit)
switch {
case value == 0x09:
case value == 0x0A:
case value == 0x0D:
case value >= 0x20 && value <= 0x7E:
case value == 0x85:
case value >= 0xA0 && value <= 0xD7FF:
case value >= 0xE000 && value <= 0xFFFD:
case value >= 0x10000 && value <= 0x10FFFF:
default:
return yaml_parser_set_reader_error(parser,
"control characters are not allowed",
parser.offset, int(value))
}
// Move the raw pointers.
parser.raw_buffer_pos += width
parser.offset += width
// Finally put the character into the buffer.
if value <= 0x7F {
// 0000 0000-0000 007F . 0xxxxxxx
parser.buffer[buffer_len+0] = byte(value)
} else if value <= 0x7FF {
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
} else if value <= 0xFFFF {
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
} else {
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
}
buffer_len += width
parser.unread++
}
// On EOF, put NUL into the buffer and return.
if parser.eof {
parser.buffer[buffer_len] = 0
buffer_len++
parser.unread++
break
}
}
parser.buffer = parser.buffer[:buffer_len]
return true
}

View File

@ -1,203 +0,0 @@
package yaml
import (
"encoding/base64"
"math"
"strconv"
"strings"
"unicode/utf8"
)
type resolveMapItem struct {
value interface{}
tag string
}
var resolveTable = make([]byte, 256)
var resolveMap = make(map[string]resolveMapItem)
func init() {
t := resolveTable
t[int('+')] = 'S' // Sign
t[int('-')] = 'S'
for _, c := range "0123456789" {
t[int(c)] = 'D' // Digit
}
for _, c := range "yYnNtTfFoO~" {
t[int(c)] = 'M' // In map
}
t[int('.')] = '.' // Float (potentially in map)
var resolveMapList = []struct {
v interface{}
tag string
l []string
}{
{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
{"<<", yaml_MERGE_TAG, []string{"<<"}},
}
m := resolveMap
for _, item := range resolveMapList {
for _, s := range item.l {
m[s] = resolveMapItem{item.v, item.tag}
}
}
}
const longTagPrefix = "tag:yaml.org,2002:"
func shortTag(tag string) string {
// TODO This can easily be made faster and produce less garbage.
if strings.HasPrefix(tag, longTagPrefix) {
return "!!" + tag[len(longTagPrefix):]
}
return tag
}
func longTag(tag string) string {
if strings.HasPrefix(tag, "!!") {
return longTagPrefix + tag[2:]
}
return tag
}
func resolvableTag(tag string) bool {
switch tag {
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
return true
}
return false
}
func resolve(tag string, in string) (rtag string, out interface{}) {
if !resolvableTag(tag) {
return tag, in
}
defer func() {
switch tag {
case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
return
}
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
}()
// Any data is accepted as a !!str or !!binary.
// Otherwise, the prefix is enough of a hint about what it might be.
hint := byte('N')
if in != "" {
hint = resolveTable[in[0]]
}
if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
// Handle things we can lookup in a map.
if item, ok := resolveMap[in]; ok {
return item.tag, item.value
}
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
// are purposefully unsupported here. They're still quoted on
// the way out for compatibility with other parser, though.
switch hint {
case 'M':
// We've already checked the map above.
case '.':
// Not in the map, so maybe a normal float.
floatv, err := strconv.ParseFloat(in, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
}
case 'D', 'S':
// Int, float, or timestamp.
plain := strings.Replace(in, "_", "", -1)
intv, err := strconv.ParseInt(plain, 0, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, int(intv)
} else {
return yaml_INT_TAG, intv
}
}
uintv, err := strconv.ParseUint(plain, 0, 64)
if err == nil {
return yaml_INT_TAG, uintv
}
floatv, err := strconv.ParseFloat(plain, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
}
if strings.HasPrefix(plain, "0b") {
intv, err := strconv.ParseInt(plain[2:], 2, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, int(intv)
} else {
return yaml_INT_TAG, intv
}
}
uintv, err := strconv.ParseUint(plain[2:], 2, 64)
if err == nil {
return yaml_INT_TAG, uintv
}
} else if strings.HasPrefix(plain, "-0b") {
intv, err := strconv.ParseInt(plain[3:], 2, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, -int(intv)
} else {
return yaml_INT_TAG, -intv
}
}
}
// XXX Handle timestamps here.
default:
panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
}
}
if tag == yaml_BINARY_TAG {
return yaml_BINARY_TAG, in
}
if utf8.ValidString(in) {
return yaml_STR_TAG, in
}
return yaml_BINARY_TAG, encodeBase64(in)
}
// encodeBase64 encodes s as base64 that is broken up into multiple lines
// as appropriate for the resulting length.
func encodeBase64(s string) string {
const lineLen = 70
encLen := base64.StdEncoding.EncodedLen(len(s))
lines := encLen/lineLen + 1
buf := make([]byte, encLen*2+lines)
in := buf[0:encLen]
out := buf[encLen:]
base64.StdEncoding.Encode(in, []byte(s))
k := 0
for i := 0; i < len(in); i += lineLen {
j := i + lineLen
if j > len(in) {
j = len(in)
}
k += copy(out[k:], in[i:j])
if lines > 1 {
out[k] = '\n'
k++
}
}
return string(out[:k])
}

File diff suppressed because it is too large Load Diff

View File

@ -1,104 +0,0 @@
package yaml
import (
"reflect"
"unicode"
)
type keyList []reflect.Value
func (l keyList) Len() int { return len(l) }
func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l keyList) Less(i, j int) bool {
a := l[i]
b := l[j]
ak := a.Kind()
bk := b.Kind()
for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
a = a.Elem()
ak = a.Kind()
}
for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
b = b.Elem()
bk = b.Kind()
}
af, aok := keyFloat(a)
bf, bok := keyFloat(b)
if aok && bok {
if af != bf {
return af < bf
}
if ak != bk {
return ak < bk
}
return numLess(a, b)
}
if ak != reflect.String || bk != reflect.String {
return ak < bk
}
ar, br := []rune(a.String()), []rune(b.String())
for i := 0; i < len(ar) && i < len(br); i++ {
if ar[i] == br[i] {
continue
}
al := unicode.IsLetter(ar[i])
bl := unicode.IsLetter(br[i])
if al && bl {
return ar[i] < br[i]
}
if al || bl {
return bl
}
var ai, bi int
var an, bn int64
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
an = an*10 + int64(ar[ai]-'0')
}
for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
bn = bn*10 + int64(br[bi]-'0')
}
if an != bn {
return an < bn
}
if ai != bi {
return ai < bi
}
return ar[i] < br[i]
}
return len(ar) < len(br)
}
// keyFloat returns a float value for v if it is a number/bool
// and whether it is a number/bool or not.
func keyFloat(v reflect.Value) (f float64, ok bool) {
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return float64(v.Int()), true
case reflect.Float32, reflect.Float64:
return v.Float(), true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return float64(v.Uint()), true
case reflect.Bool:
if v.Bool() {
return 1, true
}
return 0, true
}
return 0, false
}
// numLess returns whether a < b.
// a and b must necessarily have the same kind.
func numLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return a.Int() < b.Int()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Bool:
return !a.Bool() && b.Bool()
}
panic("not a number")
}

View File

@ -1,89 +0,0 @@
package yaml
// Set the writer error and return false.
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
emitter.error = yaml_WRITER_ERROR
emitter.problem = problem
return false
}
// Flush the output buffer.
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
if emitter.write_handler == nil {
panic("write handler not set")
}
// Check if the buffer is empty.
if emitter.buffer_pos == 0 {
return true
}
// If the output encoding is UTF-8, we don't need to recode the buffer.
if emitter.encoding == yaml_UTF8_ENCODING {
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
return true
}
// Recode the buffer into the raw buffer.
var low, high int
if emitter.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
high, low = 1, 0
}
pos := 0
for pos < emitter.buffer_pos {
// See the "reader.c" code for more details on UTF-8 encoding. Note
// that we assume that the buffer contains a valid UTF-8 sequence.
// Read the next UTF-8 character.
octet := emitter.buffer[pos]
var w int
var value rune
switch {
case octet&0x80 == 0x00:
w, value = 1, rune(octet&0x7F)
case octet&0xE0 == 0xC0:
w, value = 2, rune(octet&0x1F)
case octet&0xF0 == 0xE0:
w, value = 3, rune(octet&0x0F)
case octet&0xF8 == 0xF0:
w, value = 4, rune(octet&0x07)
}
for k := 1; k < w; k++ {
octet = emitter.buffer[pos+k]
value = (value << 6) + (rune(octet) & 0x3F)
}
pos += w
// Write the character.
if value < 0x10000 {
var b [2]byte
b[high] = byte(value >> 8)
b[low] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
} else {
// Write the character using a surrogate pair (check "reader.c").
var b [4]byte
value -= 0x10000
b[high] = byte(0xD8 + (value >> 18))
b[low] = byte((value >> 10) & 0xFF)
b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
b[low+2] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
}
}
// Write the raw buffer.
if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
emitter.raw_buffer = emitter.raw_buffer[:0]
return true
}

View File

@ -1,346 +0,0 @@
// Package yaml implements YAML support for the Go language.
//
// Source code and other details for the project are available at GitHub:
//
// https://github.com/go-yaml/yaml
//
package yaml
import (
"errors"
"fmt"
"reflect"
"strings"
"sync"
)
// MapSlice encodes and decodes as a YAML map.
// The order of keys is preserved when encoding and decoding.
type MapSlice []MapItem
// MapItem is an item in a MapSlice.
type MapItem struct {
Key, Value interface{}
}
// The Unmarshaler interface may be implemented by types to customize their
// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
// method receives a function that may be called to unmarshal the original
// YAML value into a field or variable. It is safe to call the unmarshal
// function parameter more than once if necessary.
type Unmarshaler interface {
UnmarshalYAML(unmarshal func(interface{}) error) error
}
// The Marshaler interface may be implemented by types to customize their
// behavior when being marshaled into a YAML document. The returned value
// is marshaled in place of the original value implementing Marshaler.
//
// If an error is returned by MarshalYAML, the marshaling procedure stops
// and returns with the provided error.
type Marshaler interface {
MarshalYAML() (interface{}, error)
}
// Unmarshal decodes the first document found within the in byte slice
// and assigns decoded values into the out value.
//
// Maps and pointers (to a struct, string, int, etc) are accepted as out
// values. If an internal pointer within a struct is not initialized,
// the yaml package will initialize it if necessary for unmarshalling
// the provided data. The out parameter must not be nil.
//
// The type of the decoded values should be compatible with the respective
// values in out. If one or more values cannot be decoded due to a type
// mismatches, decoding continues partially until the end of the YAML
// content, and a *yaml.TypeError is returned with details for all
// missed values.
//
// Struct fields are only unmarshalled if they are exported (have an
// upper case first letter), and are unmarshalled using the field name
// lowercased as the default key. Custom keys may be defined via the
// "yaml" name in the field tag: the content preceding the first comma
// is used as the key, and the following comma-separated options are
// used to tweak the marshalling process (see Marshal).
// Conflicting names result in a runtime error.
//
// For example:
//
// type T struct {
// F int `yaml:"a,omitempty"`
// B int
// }
// var t T
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
//
// See the documentation of Marshal for the format of tags and a list of
// supported tag options.
//
func Unmarshal(in []byte, out interface{}) (err error) {
defer handleErr(&err)
d := newDecoder()
p := newParser(in)
defer p.destroy()
node := p.parse()
if node != nil {
v := reflect.ValueOf(out)
if v.Kind() == reflect.Ptr && !v.IsNil() {
v = v.Elem()
}
d.unmarshal(node, v)
}
if len(d.terrors) > 0 {
return &TypeError{d.terrors}
}
return nil
}
// Marshal serializes the value provided into a YAML document. The structure
// of the generated document will reflect the structure of the value itself.
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
//
// Struct fields are only unmarshalled if they are exported (have an upper case
// first letter), and are unmarshalled using the field name lowercased as the
// default key. Custom keys may be defined via the "yaml" name in the field
// tag: the content preceding the first comma is used as the key, and the
// following comma-separated options are used to tweak the marshalling process.
// Conflicting names result in a runtime error.
//
// The field tag format accepted is:
//
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported:
//
// omitempty Only include the field if it's not set to the zero
// value for the type or to empty slices or maps.
// Does not apply to zero valued structs.
//
// flow Marshal using a flow style (useful for structs,
// sequences and maps).
//
// inline Inline the field, which must be a struct or a map,
// causing all of its fields or keys to be processed as if
// they were part of the outer struct. For maps, keys must
// not conflict with the yaml keys of other struct fields.
//
// In addition, if the key is "-", the field is ignored.
//
// For example:
//
// type T struct {
// F int "a,omitempty"
// B int
// }
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
//
func Marshal(in interface{}) (out []byte, err error) {
defer handleErr(&err)
e := newEncoder()
defer e.destroy()
e.marshal("", reflect.ValueOf(in))
e.finish()
out = e.out
return
}
func handleErr(err *error) {
if v := recover(); v != nil {
if e, ok := v.(yamlError); ok {
*err = e.err
} else {
panic(v)
}
}
}
type yamlError struct {
err error
}
func fail(err error) {
panic(yamlError{err})
}
func failf(format string, args ...interface{}) {
panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
}
// A TypeError is returned by Unmarshal when one or more fields in
// the YAML document cannot be properly decoded into the requested
// types. When this error is returned, the value is still
// unmarshaled partially.
type TypeError struct {
Errors []string
}
func (e *TypeError) Error() string {
return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
}
// --------------------------------------------------------------------------
// Maintain a mapping of keys to structure field indexes
// The code in this section was copied from mgo/bson.
// structInfo holds details for the serialization of fields of
// a given struct.
type structInfo struct {
FieldsMap map[string]fieldInfo
FieldsList []fieldInfo
// InlineMap is the number of the field in the struct that
// contains an ,inline map, or -1 if there's none.
InlineMap int
}
type fieldInfo struct {
Key string
Num int
OmitEmpty bool
Flow bool
// Inline holds the field index if the field is part of an inlined struct.
Inline []int
}
var structMap = make(map[reflect.Type]*structInfo)
var fieldMapMutex sync.RWMutex
func getStructInfo(st reflect.Type) (*structInfo, error) {
fieldMapMutex.RLock()
sinfo, found := structMap[st]
fieldMapMutex.RUnlock()
if found {
return sinfo, nil
}
n := st.NumField()
fieldsMap := make(map[string]fieldInfo)
fieldsList := make([]fieldInfo, 0, n)
inlineMap := -1
for i := 0; i != n; i++ {
field := st.Field(i)
if field.PkgPath != "" {
continue // Private field
}
info := fieldInfo{Num: i}
tag := field.Tag.Get("yaml")
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
tag = string(field.Tag)
}
if tag == "-" {
continue
}
inline := false
fields := strings.Split(tag, ",")
if len(fields) > 1 {
for _, flag := range fields[1:] {
switch flag {
case "omitempty":
info.OmitEmpty = true
case "flow":
info.Flow = true
case "inline":
inline = true
default:
return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
}
}
tag = fields[0]
}
if inline {
switch field.Type.Kind() {
case reflect.Map:
if inlineMap >= 0 {
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
}
if field.Type.Key() != reflect.TypeOf("") {
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
}
inlineMap = info.Num
case reflect.Struct:
sinfo, err := getStructInfo(field.Type)
if err != nil {
return nil, err
}
for _, finfo := range sinfo.FieldsList {
if _, found := fieldsMap[finfo.Key]; found {
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
if finfo.Inline == nil {
finfo.Inline = []int{i, finfo.Num}
} else {
finfo.Inline = append([]int{i}, finfo.Inline...)
}
fieldsMap[finfo.Key] = finfo
fieldsList = append(fieldsList, finfo)
}
default:
//return nil, errors.New("Option ,inline needs a struct value or map field")
return nil, errors.New("Option ,inline needs a struct value field")
}
continue
}
if tag != "" {
info.Key = tag
} else {
info.Key = strings.ToLower(field.Name)
}
if _, found = fieldsMap[info.Key]; found {
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
fieldsList = append(fieldsList, info)
fieldsMap[info.Key] = info
}
sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
fieldMapMutex.Lock()
structMap[st] = sinfo
fieldMapMutex.Unlock()
return sinfo, nil
}
func isZero(v reflect.Value) bool {
switch v.Kind() {
case reflect.String:
return len(v.String()) == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
case reflect.Slice:
return v.Len() == 0
case reflect.Map:
return v.Len() == 0
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Struct:
vt := v.Type()
for i := v.NumField() - 1; i >= 0; i-- {
if vt.Field(i).PkgPath != "" {
continue // Private field
}
if !isZero(v.Field(i)) {
return false
}
}
return true
}
return false
}

View File

@ -1,716 +0,0 @@
package yaml
import (
"io"
)
// The version directive data.
type yaml_version_directive_t struct {
major int8 // The major version number.
minor int8 // The minor version number.
}
// The tag directive data.
type yaml_tag_directive_t struct {
handle []byte // The tag handle.
prefix []byte // The tag prefix.
}
type yaml_encoding_t int
// The stream encoding.
const (
// Let the parser choose the encoding.
yaml_ANY_ENCODING yaml_encoding_t = iota
yaml_UTF8_ENCODING // The default UTF-8 encoding.
yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
)
type yaml_break_t int
// Line break types.
const (
// Let the parser choose the break type.
yaml_ANY_BREAK yaml_break_t = iota
yaml_CR_BREAK // Use CR for line breaks (Mac style).
yaml_LN_BREAK // Use LN for line breaks (Unix style).
yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
)
type yaml_error_type_t int
// Many bad things could happen with the parser and emitter.
const (
// No error is produced.
yaml_NO_ERROR yaml_error_type_t = iota
yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
yaml_READER_ERROR // Cannot read or decode the input stream.
yaml_SCANNER_ERROR // Cannot scan the input stream.
yaml_PARSER_ERROR // Cannot parse the input stream.
yaml_COMPOSER_ERROR // Cannot compose a YAML document.
yaml_WRITER_ERROR // Cannot write to the output stream.
yaml_EMITTER_ERROR // Cannot emit a YAML stream.
)
// The pointer position.
type yaml_mark_t struct {
index int // The position index.
line int // The position line.
column int // The position column.
}
// Node Styles
type yaml_style_t int8
type yaml_scalar_style_t yaml_style_t
// Scalar styles.
const (
// Let the emitter choose the style.
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
)
type yaml_sequence_style_t yaml_style_t
// Sequence styles.
const (
// Let the emitter choose the style.
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
)
type yaml_mapping_style_t yaml_style_t
// Mapping styles.
const (
// Let the emitter choose the style.
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
yaml_BLOCK_MAPPING_STYLE // The block mapping style.
yaml_FLOW_MAPPING_STYLE // The flow mapping style.
)
// Tokens
type yaml_token_type_t int
// Token types.
const (
// An empty token.
yaml_NO_TOKEN yaml_token_type_t = iota
yaml_STREAM_START_TOKEN // A STREAM-START token.
yaml_STREAM_END_TOKEN // A STREAM-END token.
yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
yaml_BLOCK_END_TOKEN // A BLOCK-END token.
yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
yaml_KEY_TOKEN // A KEY token.
yaml_VALUE_TOKEN // A VALUE token.
yaml_ALIAS_TOKEN // An ALIAS token.
yaml_ANCHOR_TOKEN // An ANCHOR token.
yaml_TAG_TOKEN // A TAG token.
yaml_SCALAR_TOKEN // A SCALAR token.
)
func (tt yaml_token_type_t) String() string {
switch tt {
case yaml_NO_TOKEN:
return "yaml_NO_TOKEN"
case yaml_STREAM_START_TOKEN:
return "yaml_STREAM_START_TOKEN"
case yaml_STREAM_END_TOKEN:
return "yaml_STREAM_END_TOKEN"
case yaml_VERSION_DIRECTIVE_TOKEN:
return "yaml_VERSION_DIRECTIVE_TOKEN"
case yaml_TAG_DIRECTIVE_TOKEN:
return "yaml_TAG_DIRECTIVE_TOKEN"
case yaml_DOCUMENT_START_TOKEN:
return "yaml_DOCUMENT_START_TOKEN"
case yaml_DOCUMENT_END_TOKEN:
return "yaml_DOCUMENT_END_TOKEN"
case yaml_BLOCK_SEQUENCE_START_TOKEN:
return "yaml_BLOCK_SEQUENCE_START_TOKEN"
case yaml_BLOCK_MAPPING_START_TOKEN:
return "yaml_BLOCK_MAPPING_START_TOKEN"
case yaml_BLOCK_END_TOKEN:
return "yaml_BLOCK_END_TOKEN"
case yaml_FLOW_SEQUENCE_START_TOKEN:
return "yaml_FLOW_SEQUENCE_START_TOKEN"
case yaml_FLOW_SEQUENCE_END_TOKEN:
return "yaml_FLOW_SEQUENCE_END_TOKEN"
case yaml_FLOW_MAPPING_START_TOKEN:
return "yaml_FLOW_MAPPING_START_TOKEN"
case yaml_FLOW_MAPPING_END_TOKEN:
return "yaml_FLOW_MAPPING_END_TOKEN"
case yaml_BLOCK_ENTRY_TOKEN:
return "yaml_BLOCK_ENTRY_TOKEN"
case yaml_FLOW_ENTRY_TOKEN:
return "yaml_FLOW_ENTRY_TOKEN"
case yaml_KEY_TOKEN:
return "yaml_KEY_TOKEN"
case yaml_VALUE_TOKEN:
return "yaml_VALUE_TOKEN"
case yaml_ALIAS_TOKEN:
return "yaml_ALIAS_TOKEN"
case yaml_ANCHOR_TOKEN:
return "yaml_ANCHOR_TOKEN"
case yaml_TAG_TOKEN:
return "yaml_TAG_TOKEN"
case yaml_SCALAR_TOKEN:
return "yaml_SCALAR_TOKEN"
}
return "<unknown token>"
}
// The token structure.
type yaml_token_t struct {
// The token type.
typ yaml_token_type_t
// The start/end of the token.
start_mark, end_mark yaml_mark_t
// The stream encoding (for yaml_STREAM_START_TOKEN).
encoding yaml_encoding_t
// The alias/anchor/scalar value or tag/tag directive handle
// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
value []byte
// The tag suffix (for yaml_TAG_TOKEN).
suffix []byte
// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
prefix []byte
// The scalar style (for yaml_SCALAR_TOKEN).
style yaml_scalar_style_t
// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
major, minor int8
}
// Events
type yaml_event_type_t int8
// Event types.
const (
// An empty event.
yaml_NO_EVENT yaml_event_type_t = iota
yaml_STREAM_START_EVENT // A STREAM-START event.
yaml_STREAM_END_EVENT // A STREAM-END event.
yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
yaml_ALIAS_EVENT // An ALIAS event.
yaml_SCALAR_EVENT // A SCALAR event.
yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
yaml_MAPPING_START_EVENT // A MAPPING-START event.
yaml_MAPPING_END_EVENT // A MAPPING-END event.
)
// The event structure.
type yaml_event_t struct {
// The event type.
typ yaml_event_type_t
// The start and end of the event.
start_mark, end_mark yaml_mark_t
// The document encoding (for yaml_STREAM_START_EVENT).
encoding yaml_encoding_t
// The version directive (for yaml_DOCUMENT_START_EVENT).
version_directive *yaml_version_directive_t
// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
tag_directives []yaml_tag_directive_t
// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
anchor []byte
// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
tag []byte
// The scalar value (for yaml_SCALAR_EVENT).
value []byte
// Is the document start/end indicator implicit, or the tag optional?
// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
implicit bool
// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
quoted_implicit bool
// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
style yaml_style_t
}
func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
// Nodes
const (
yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
// Not in original libyaml.
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
)
type yaml_node_type_t int
// Node types.
const (
// An empty node.
yaml_NO_NODE yaml_node_type_t = iota
yaml_SCALAR_NODE // A scalar node.
yaml_SEQUENCE_NODE // A sequence node.
yaml_MAPPING_NODE // A mapping node.
)
// An element of a sequence node.
type yaml_node_item_t int
// An element of a mapping node.
type yaml_node_pair_t struct {
key int // The key of the element.
value int // The value of the element.
}
// The node structure.
type yaml_node_t struct {
typ yaml_node_type_t // The node type.
tag []byte // The node tag.
// The node data.
// The scalar parameters (for yaml_SCALAR_NODE).
scalar struct {
value []byte // The scalar value.
length int // The length of the scalar value.
style yaml_scalar_style_t // The scalar style.
}
// The sequence parameters (for YAML_SEQUENCE_NODE).
sequence struct {
items_data []yaml_node_item_t // The stack of sequence items.
style yaml_sequence_style_t // The sequence style.
}
// The mapping parameters (for yaml_MAPPING_NODE).
mapping struct {
pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
pairs_start *yaml_node_pair_t // The beginning of the stack.
pairs_end *yaml_node_pair_t // The end of the stack.
pairs_top *yaml_node_pair_t // The top of the stack.
style yaml_mapping_style_t // The mapping style.
}
start_mark yaml_mark_t // The beginning of the node.
end_mark yaml_mark_t // The end of the node.
}
// The document structure.
type yaml_document_t struct {
// The document nodes.
nodes []yaml_node_t
// The version directive.
version_directive *yaml_version_directive_t
// The list of tag directives.
tag_directives_data []yaml_tag_directive_t
tag_directives_start int // The beginning of the tag directives list.
tag_directives_end int // The end of the tag directives list.
start_implicit int // Is the document start indicator implicit?
end_implicit int // Is the document end indicator implicit?
// The start/end of the document.
start_mark, end_mark yaml_mark_t
}
// The prototype of a read handler.
//
// The read handler is called when the parser needs to read more bytes from the
// source. The handler should write not more than size bytes to the buffer.
// The number of written bytes should be set to the size_read variable.
//
// [in,out] data A pointer to an application data specified by
// yaml_parser_set_input().
// [out] buffer The buffer to write the data from the source.
// [in] size The size of the buffer.
// [out] size_read The actual number of bytes read from the source.
//
// On success, the handler should return 1. If the handler failed,
// the returned value should be 0. On EOF, the handler should set the
// size_read to 0 and return 1.
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
// This structure holds information about a potential simple key.
type yaml_simple_key_t struct {
possible bool // Is a simple key possible?
required bool // Is a simple key required?
token_number int // The number of the token.
mark yaml_mark_t // The position mark.
}
// The states of the parser.
type yaml_parser_state_t int
const (
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
yaml_PARSE_END_STATE // Expect nothing.
)
func (ps yaml_parser_state_t) String() string {
switch ps {
case yaml_PARSE_STREAM_START_STATE:
return "yaml_PARSE_STREAM_START_STATE"
case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
case yaml_PARSE_DOCUMENT_START_STATE:
return "yaml_PARSE_DOCUMENT_START_STATE"
case yaml_PARSE_DOCUMENT_CONTENT_STATE:
return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
case yaml_PARSE_DOCUMENT_END_STATE:
return "yaml_PARSE_DOCUMENT_END_STATE"
case yaml_PARSE_BLOCK_NODE_STATE:
return "yaml_PARSE_BLOCK_NODE_STATE"
case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
case yaml_PARSE_FLOW_NODE_STATE:
return "yaml_PARSE_FLOW_NODE_STATE"
case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
case yaml_PARSE_END_STATE:
return "yaml_PARSE_END_STATE"
}
return "<unknown parser state>"
}
// This structure holds aliases data.
type yaml_alias_data_t struct {
anchor []byte // The anchor.
index int // The node id.
mark yaml_mark_t // The anchor mark.
}
// The parser structure.
//
// All members are internal. Manage the structure using the
// yaml_parser_ family of functions.
type yaml_parser_t struct {
// Error handling
error yaml_error_type_t // Error type.
problem string // Error description.
// The byte about which the problem occured.
problem_offset int
problem_value int
problem_mark yaml_mark_t
// The error context.
context string
context_mark yaml_mark_t
// Reader stuff
read_handler yaml_read_handler_t // Read handler.
input_file io.Reader // File input data.
input []byte // String input data.
input_pos int
eof bool // EOF flag
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.
unread int // The number of unread characters in the buffer.
raw_buffer []byte // The raw buffer.
raw_buffer_pos int // The current position of the buffer.
encoding yaml_encoding_t // The input encoding.
offset int // The offset of the current position (in bytes).
mark yaml_mark_t // The mark of the current position.
// Scanner stuff
stream_start_produced bool // Have we started to scan the input stream?
stream_end_produced bool // Have we reached the end of the input stream?
flow_level int // The number of unclosed '[' and '{' indicators.
tokens []yaml_token_t // The tokens queue.
tokens_head int // The head of the tokens queue.
tokens_parsed int // The number of tokens fetched from the queue.
token_available bool // Does the tokens queue contain a token ready for dequeueing.
indent int // The current indentation level.
indents []int // The indentation levels stack.
simple_key_allowed bool // May a simple key occur at the current position?
simple_keys []yaml_simple_key_t // The stack of simple keys.
// Parser stuff
state yaml_parser_state_t // The current parser state.
states []yaml_parser_state_t // The parser states stack.
marks []yaml_mark_t // The stack of marks.
tag_directives []yaml_tag_directive_t // The list of TAG directives.
// Dumper stuff
aliases []yaml_alias_data_t // The alias data.
document *yaml_document_t // The currently parsed document.
}
// Emitter Definitions
// The prototype of a write handler.
//
// The write handler is called when the emitter needs to flush the accumulated
// characters to the output. The handler should write @a size bytes of the
// @a buffer to the output.
//
// @param[in,out] data A pointer to an application data specified by
// yaml_emitter_set_output().
// @param[in] buffer The buffer with bytes to be written.
// @param[in] size The size of the buffer.
//
// @returns On success, the handler should return @c 1. If the handler failed,
// the returned value should be @c 0.
//
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
type yaml_emitter_state_t int
// The emitter states.
const (
// Expect STREAM-START.
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
yaml_EMIT_END_STATE // Expect nothing.
)
// The emitter structure.
//
// All members are internal. Manage the structure using the @c yaml_emitter_
// family of functions.
type yaml_emitter_t struct {
// Error handling
error yaml_error_type_t // Error type.
problem string // Error description.
// Writer stuff
write_handler yaml_write_handler_t // Write handler.
output_buffer *[]byte // String output data.
output_file io.Writer // File output data.
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.
raw_buffer []byte // The raw buffer.
raw_buffer_pos int // The current position of the buffer.
encoding yaml_encoding_t // The stream encoding.
// Emitter stuff
canonical bool // If the output is in the canonical style?
best_indent int // The number of indentation spaces.
best_width int // The preferred width of the output lines.
unicode bool // Allow unescaped non-ASCII characters?
line_break yaml_break_t // The preferred line break.
state yaml_emitter_state_t // The current emitter state.
states []yaml_emitter_state_t // The stack of states.
events []yaml_event_t // The event queue.
events_head int // The head of the event queue.
indents []int // The stack of indentation levels.
tag_directives []yaml_tag_directive_t // The list of tag directives.
indent int // The current indentation level.
flow_level int // The current flow level.
root_context bool // Is it the document root context?
sequence_context bool // Is it a sequence context?
mapping_context bool // Is it a mapping context?
simple_key_context bool // Is it a simple mapping key context?
line int // The current line.
column int // The current column.
whitespace bool // If the last character was a whitespace?
indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
open_ended bool // If an explicit document end is required?
// Anchor analysis.
anchor_data struct {
anchor []byte // The anchor value.
alias bool // Is it an alias?
}
// Tag analysis.
tag_data struct {
handle []byte // The tag handle.
suffix []byte // The tag suffix.
}
// Scalar analysis.
scalar_data struct {
value []byte // The scalar value.
multiline bool // Does the scalar contain line breaks?
flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
block_plain_allowed bool // Can the scalar be expressed in the block plain style?
single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
block_allowed bool // Can the scalar be expressed in the literal or folded styles?
style yaml_scalar_style_t // The output style.
}
// Dumper stuff
opened bool // If the stream was already opened?
closed bool // If the stream was already closed?
// The information associated with the document nodes.
anchors *struct {
references int // The number of references.
anchor int // The anchor id.
serialized bool // If the node has been emitted?
}
last_anchor_id int // The last assigned anchor id.
document *yaml_document_t // The currently emitted document.
}

View File

@ -1,173 +0,0 @@
package yaml
const (
// The size of the input raw buffer.
input_raw_buffer_size = 512
// The size of the input buffer.
// It should be possible to decode the whole raw buffer.
input_buffer_size = input_raw_buffer_size * 3
// The size of the output buffer.
output_buffer_size = 128
// The size of the output raw buffer.
// It should be possible to encode the whole output buffer.
output_raw_buffer_size = (output_buffer_size*2 + 2)
// The size of other stacks and queues.
initial_stack_size = 16
initial_queue_size = 16
initial_string_size = 16
)
// Check if the character at the specified position is an alphabetical
// character, a digit, '_', or '-'.
func is_alpha(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
}
// Check if the character at the specified position is a digit.
func is_digit(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9'
}
// Get the value of a digit.
func as_digit(b []byte, i int) int {
return int(b[i]) - '0'
}
// Check if the character at the specified position is a hex-digit.
func is_hex(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
}
// Get the value of a hex-digit.
func as_hex(b []byte, i int) int {
bi := b[i]
if bi >= 'A' && bi <= 'F' {
return int(bi) - 'A' + 10
}
if bi >= 'a' && bi <= 'f' {
return int(bi) - 'a' + 10
}
return int(bi) - '0'
}
// Check if the character is ASCII.
func is_ascii(b []byte, i int) bool {
return b[i] <= 0x7F
}
// Check if the character at the start of the buffer can be printed unescaped.
func is_printable(b []byte, i int) bool {
return ((b[i] == 0x0A) || // . == #x0A
(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
(b[i] > 0xC2 && b[i] < 0xED) ||
(b[i] == 0xED && b[i+1] < 0xA0) ||
(b[i] == 0xEE) ||
(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
}
// Check if the character at the specified position is NUL.
func is_z(b []byte, i int) bool {
return b[i] == 0x00
}
// Check if the beginning of the buffer is a BOM.
func is_bom(b []byte, i int) bool {
return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
}
// Check if the character at the specified position is space.
func is_space(b []byte, i int) bool {
return b[i] == ' '
}
// Check if the character at the specified position is tab.
func is_tab(b []byte, i int) bool {
return b[i] == '\t'
}
// Check if the character at the specified position is blank (space or tab).
func is_blank(b []byte, i int) bool {
//return is_space(b, i) || is_tab(b, i)
return b[i] == ' ' || b[i] == '\t'
}
// Check if the character at the specified position is a line break.
func is_break(b []byte, i int) bool {
return (b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
}
func is_crlf(b []byte, i int) bool {
return b[i] == '\r' && b[i+1] == '\n'
}
// Check if the character is a line break or NUL.
func is_breakz(b []byte, i int) bool {
//return is_break(b, i) || is_z(b, i)
return ( // is_break:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
// is_z:
b[i] == 0)
}
// Check if the character is a line break, space, or NUL.
func is_spacez(b []byte, i int) bool {
//return is_space(b, i) || is_breakz(b, i)
return ( // is_space:
b[i] == ' ' ||
// is_breakz:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
b[i] == 0)
}
// Check if the character is a line break, space, tab, or NUL.
func is_blankz(b []byte, i int) bool {
//return is_blank(b, i) || is_breakz(b, i)
return ( // is_blank:
b[i] == ' ' || b[i] == '\t' ||
// is_breakz:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
b[i] == 0)
}
// Determine the width of the character.
func width(b byte) int {
// Don't replace these by a switch without first
// confirming that it is being inlined.
if b&0x80 == 0x00 {
return 1
}
if b&0xE0 == 0xC0 {
return 2
}
if b&0xF0 == 0xE0 {
return 3
}
if b&0xF8 == 0xF0 {
return 4
}
return 0
}

19
LICENSE Normal file
View File

@ -0,0 +1,19 @@
Copyright (c) 2017 Mike Farah
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

114
Makefile Normal file
View File

@ -0,0 +1,114 @@
MAKEFLAGS += --warn-undefined-variables
SHELL := /bin/bash
.SHELLFLAGS := -o pipefail -euc
.DEFAULT_GOAL := install
include Makefile.variables
.PHONY: help
help:
@echo 'Management commands for cicdtest:'
@echo
@echo 'Usage:'
@echo ' ## Develop / Test Commands'
@echo ' make build Build yq binary.'
@echo ' make install Install yq.'
@echo ' make xcompile Build cross-compiled binaries of yq.'
@echo ' make vendor Install dependencies to vendor directory.'
@echo ' make format Run code formatter.'
@echo ' make check Run static code analysis (lint).'
@echo ' make secure Run gosec.'
@echo ' make test Run tests on project.'
@echo ' make cover Run tests and capture code coverage metrics on project.'
@echo ' make clean Clean the directory tree of produced artifacts.'
@echo
@echo ' ## Utility Commands'
@echo ' make setup Configures Minishfit/Docker directory mounts.'
@echo
.PHONY: clean
clean:
@rm -rf bin build cover *.out
## prefix before other make targets to run in your local dev environment
local: | quiet
@$(eval DOCKRUN= )
@mkdir -p tmp
@touch tmp/dev_image_id
quiet: # this is silly but shuts up 'Nothing to be done for `local`'
@:
prepare: tmp/dev_image_id
tmp/dev_image_id: Dockerfile.dev scripts/devtools.sh
@mkdir -p tmp
@docker rmi -f ${DEV_IMAGE} > /dev/null 2>&1 || true
@docker build -t ${DEV_IMAGE} -f Dockerfile.dev .
@docker inspect -f "{{ .ID }}" ${DEV_IMAGE} > tmp/dev_image_id
# ----------------------------------------------
# build
.PHONY: build
build: build/dev
.PHONY: build/dev
build/dev: test *.go
@mkdir -p bin/
${DOCKRUN} go build --ldflags "$(LDFLAGS)"
${DOCKRUN} bash ./scripts/acceptance.sh
## Compile the project for multiple OS and Architectures.
xcompile: check
@rm -rf build/
@mkdir -p build
${DOCKRUN} bash ./scripts/xcompile.sh
@find build -type d -exec chmod 755 {} \; || :
@find build -type f -exec chmod 755 {} \; || :
.PHONY: install
install: build
${DOCKRUN} go install
# Each of the fetch should be an entry within vendor.json; not currently included within project
.PHONY: vendor
vendor: tmp/dev_image_id
${DOCKRUN} go mod vendor
# ----------------------------------------------
# develop and test
.PHONY: format
format: vendor
${DOCKRUN} bash ./scripts/format.sh
.PHONY: check
check: format
${DOCKRUN} bash ./scripts/check.sh
.PHONY: secure
secure:
${DOCKRUN} bash ./scripts/secure.sh
.PHONY: test
test: check
${DOCKRUN} bash ./scripts/test.sh
.PHONY: cover
cover: check
@rm -rf cover/
@mkdir -p cover
${DOCKRUN} bash ./scripts/coverage.sh
@find cover -type d -exec chmod 755 {} \; || :
@find cover -type f -exec chmod 644 {} \; || :
.PHONY: release
release: xcompile
${DOCKRUN} bash ./scripts/publish.sh
# ----------------------------------------------
# utilities
.PHONY: setup
setup:
@bash ./scripts/setup.sh

38
Makefile.variables Normal file
View File

@ -0,0 +1,38 @@
export PROJECT = yq
IMPORT_PATH := github.com/mikefarah/${PROJECT}
export GIT_COMMIT = $(shell git rev-parse --short HEAD)
export GIT_DIRTY = $(shell test -n "$$(git status --porcelain)" && echo "+CHANGES" || true)
export GIT_DESCRIBE = $(shell git describe --tags --always)
LDFLAGS :=
LDFLAGS += -X main.GitCommit=${GIT_COMMIT}${GIT_DIRTY}
LDFLAGS += -X main.GitDescribe=${GIT_DESCRIBE}
GITHUB_TOKEN ?=
# Windows environment?
CYG_CHECK := $(shell hash cygpath 2>/dev/null && echo 1)
ifeq ($(CYG_CHECK),1)
VBOX_CHECK := $(shell hash VBoxManage 2>/dev/null && echo 1)
# Docker Toolbox (pre-Windows 10)
ifeq ($(VBOX_CHECK),1)
ROOT := /${PROJECT}
else
# Docker Windows
ROOT := $(shell cygpath -m -a "$(shell pwd)")
endif
else
# all non-windows environments
ROOT := $(shell pwd)
endif
DEV_IMAGE := ${PROJECT}_dev
DOCKRUN := docker run --rm \
-e LDFLAGS="${LDFLAGS}" \
-e GITHUB_TOKEN="${GITHUB_TOKEN}" \
-v ${ROOT}/vendor:/go/src \
-v ${ROOT}:/${PROJECT}/src/${IMPORT_PATH} \
-w /${PROJECT}/src/${IMPORT_PATH} \
${DEV_IMAGE}

355
README.md
View File

@ -1,176 +1,211 @@
# yaml # yq
yaml is a lightweight and flexible command-line YAML processor
The aim of the project is to be the [jq](https://github.com/stedolan/jq) or sed of yaml files. ![Build](https://github.com/mikefarah/yq/workflows/Build/badge.svg) ![Docker Pulls](https://img.shields.io/docker/pulls/mikefarah/yq.svg) ![Github Releases (by Release)](https://img.shields.io/github/downloads/mikefarah/yq/total.svg) ![Go Report](https://goreportcard.com/badge/github.com/mikefarah/yq)
a lightweight and portable command-line YAML processor. `yq` uses [jq](https://github.com/stedolan/jq) like syntax but works with yaml files as well as json. It doesn't yet support everything `jq` does - but it does support the most common operations and functions, and more is being added continuously.
yq is written in go - so you can download a dependency free binary for your platform and you are good to go! If you prefer there are a variety of package managers that can be used as well as docker, all listed below.
## V4 released!
V4 is now officially released, it's quite different from V3 (sorry for the migration), however it is much more similar to ```jq```, using a similar expression syntax and therefore support much more complex functionality!
If you've been using v3 and want/need to upgrade, checkout the [upgrade guide](https://mikefarah.gitbook.io/yq/v/v4.x/upgrading-from-v3).
Support for v3 will cease August 2021, until then, critical bug and security fixes will still get applied if required.
## Install ## Install
[Download latest binary](https://github.com/mikefarah/yaml/releases/latest) or alternatively:
### [Download the latest binary](https://github.com/mikefarah/yq/releases/latest)
### wget
Use wget to download the pre-compiled binaries:
#### Compressed via tar.gz
```bash
wget https://github.com/mikefarah/yq/releases/download/${VERSION}/${BINARY}.tar.gz -O - |\
tar xz && mv ${BINARY} /usr/bin/yq
``` ```
go get github.com/mikefarah/yaml
#### Plain binary
```bash
wget https://github.com/mikefarah/yq/releases/download/${VERSION}/${BINARY} -O /usr/bin/yq &&\
chmod +x /usr/bin/yq
``` ```
For instance, VERSION=v4.2.0 and BINARY=yq_linux_amd64
### MacOS / Linux via Homebrew:
Using [Homebrew](https://brew.sh/)
```
brew install yq
```
or, for the (deprecated) v3 version:
```
brew install yq@3
```
Note that for v3, as it is a versioned brew it will not add the `yq` command to your path automatically. Please follow the instructions given by brew upon installation.
### Linux via snap:
```
snap install yq
```
or, for the (deprecated) v3 version:
```
snap install yq --channel=v3/stable
```
#### Snap notes
`yq` installs with [_strict confinement_](https://docs.snapcraft.io/snap-confinement/6233) in snap, this means it doesn't have direct access to root files. To read root files you can:
```
sudo cat /etc/myfile | yq e '.a.path' -
```
And to write to a root file you can either use [sponge](https://linux.die.net/man/1/sponge):
```
sudo cat /etc/myfile | yq e '.a.path = "value"' - | sudo sponge /etc/myfile
```
or write to a temporary file:
```
sudo cat /etc/myfile | yq e '.a.path = "value"' | sudo tee /etc/myfile.tmp
sudo mv /etc/myfile.tmp /etc/myfile
rm /etc/myfile.tmp
```
### Run with Docker
#### Oneshot use:
```bash
docker run --rm -v "${PWD}":/workdir mikefarah/yq <command> [flags] [expression ]FILE...
```
#### Run commands interactively:
```bash
docker run --rm -it -v "${PWD}":/workdir --entrypoint sh mikefarah/yq
```
It can be useful to have a bash function to avoid typing the whole docker command:
```bash
yq() {
docker run --rm -i -v "${PWD}":/workdir mikefarah/yq "$@"
}
```
### Go Get:
```
GO111MODULE=on go get github.com/mikefarah/yq/v4
```
## Community Supported Installation methods
As these are supported by the community :heart: - however, they may be out of date with the officially supported releases.
# Webi
```
webi yq
```
See [webi](https://webinstall.dev/)
Supported by @adithyasunil26 (https://github.com/webinstall/webi-installers/tree/master/yq)
### Windows:
[![Chocolatey](https://img.shields.io/chocolatey/v/yq.svg)](https://chocolatey.org/packages/yq)
[![Chocolatey](https://img.shields.io/chocolatey/dt/yq.svg)](https://chocolatey.org/packages/yq)
```
choco install yq
```
Supported by @chillum (https://chocolatey.org/packages/yq)
### Mac:
Using [MacPorts](https://www.macports.org/)
```
sudo port selfupdate
sudo port install yq
```
Supported by @herbygillot (https://ports.macports.org/maintainer/github/herbygillot)
### Alpine Linux
- Enable edge/community repo by adding ```$MIRROR/alpine/edge/community``` to ```/etc/apk/repositories```
- Update database index with ```apk update```
- Install yq with ```apk add yq```
Supported by Tuan Hoang
https://pkgs.alpinelinux.org/package/edge/community/x86/yq
### On Ubuntu 16.04 or higher from Debian package:
```sh
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys CC86BB64
sudo add-apt-repository ppa:rmescandon/yq
sudo apt update
sudo apt install yq -y
```
Supported by @rmescandon (https://launchpad.net/~rmescandon/+archive/ubuntu/yq)
## Features ## Features
- [Detailed documentation with many examples](https://mikefarah.gitbook.io/yq/)
- Written in portable go, so you can download a lovely dependency free binary - Written in portable go, so you can download a lovely dependency free binary
- Deep read a yaml file with a given path - Uses similar syntax as `jq` but works with YAML and JSON files
- Update a yaml file given a path - Fully supports multi document yaml files
- Update a yaml file given a script file - Colorized yaml output
- Convert from json to yaml - [Deeply traverse yaml](https://mikefarah.gitbook.io/yq/operators/traverse-read)
- Convert from yaml to json - [Sort yaml by keys](https://mikefarah.gitbook.io/yq/operators/sort-keys)
- Manipulate yaml [comments](https://mikefarah.gitbook.io/yq/operators/comment-operators), [styling](https://mikefarah.gitbook.io/yq/operators/style), [tags](https://mikefarah.gitbook.io/yq/operators/tag) and [anchors and aliases](https://mikefarah.gitbook.io/yq/operators/anchor-and-alias-operators).
- [Update yaml inplace](https://mikefarah.gitbook.io/yq/v/v4.x/commands/evaluate#flags)
- [Complex expressions to select and update](https://mikefarah.gitbook.io/yq/operators/select#select-and-update-matching-values-in-map)
- Keeps yaml formatting and comments when updating (though there are issues with whitespace)
- [Convert to/from json to yaml](https://mikefarah.gitbook.io/yq/v/v4.x/usage/convert)
- [Pipe data in by using '-'](https://mikefarah.gitbook.io/yq/v/v4.x/commands/evaluate)
- [General shell completion scripts (bash/zsh/fish/powershell)](https://mikefarah.gitbook.io/yq/v/v4.x/commands/shell-completion)
- [Reduce](https://mikefarah.gitbook.io/yq/operators/reduce) to merge multiple files or sum an array or other fancy things.
## [Usage](https://mikefarah.gitbook.io/yq/)
Check out the [documentation](https://mikefarah.gitbook.io/yq/) for more detailed and advanced usage.
## Read examples
``` ```
yaml r <yaml file> <path> Usage:
yq [flags]
yq [command]
Available Commands:
eval Apply expression to each document in each yaml file given in sequence
eval-all Loads _all_ yaml documents of _all_ yaml files and runs expression once
help Help about any command
shell-completion Generate completion script
Flags:
-C, --colors force print with colors
-e, --exit-status set exit status if there are no matches or null or false is returned
-h, --help help for yq
-I, --indent int sets indent level for output (default 2)
-i, --inplace update the yaml file inplace of first yaml file given.
-M, --no-colors force print with no colors
-N, --no-doc Don't print document separators (---)
-n, --null-input Don't read input, simply evaluate the expression given. Useful for creating yaml docs from scratch.
-P, --prettyPrint pretty print, shorthand for '... style = ""'
-j, --tojson output as json. Set indent to 0 to print json in one line.
-v, --verbose verbose mode
-V, --version Print version information and quit
Use "yq [command] --help" for more information about a command.
``` ```
### Basic Simple Example:
Given a sample.yaml file of:
```yaml
b:
c: 2
```
then
```bash
yaml r sample.yaml b.c
```
will output the value of '2'.
### Reading from STDIN
Given a sample.yaml file of:
```bash
cat sample.yaml | yaml r - b.c
```
will output the value of '2'.
### Splat
Given a sample.yaml file of:
```yaml
---
bob:
item1:
cats: bananas
item2:
cats: apples
```
then
```bash
yaml r sample.yaml bob.*.cats
```
will output
```yaml
- bananas
- apples
```
### Handling '.' in the yaml key
Given a sample.yaml file of:
```yaml
b.x:
c: 2
```
then
```bash
yaml r sample.yaml \"b.x\".c
```
will output the value of '2'.
### Arrays
You can give an index to access a specific element:
e.g.: given a sample file of
```yaml
b:
e:
- name: fred
value: 3
- name: sam
value: 4
```
then
```
yaml r sample.yaml b.e[1].name
```
will output 'sam'
### Array Splat
e.g.: given a sample file of
```yaml
b:
e:
- name: fred
value: 3
- name: sam
value: 4
```
then
```
yaml r sample.yaml b.e[*].name
```
will output:
```
- fred
- sam
```
## Update examples
### Update to stdout
Given a sample.yaml file of:
```yaml
b:
c: 2
```
then
```bash
yaml w sample.yaml b.c cat
```
will output:
```yaml
b:
c: cat
```
### Updating yaml in-place
Given a sample.yaml file of:
```yaml
b:
c: 2
```
then
```bash
yaml w -i sample.yaml b.c cat
```
will update the sample.yaml file so that the value of 'c' is cat.
### Updating multiple values with a script
Given a sample.yaml file of:
```yaml
b:
c: 2
e:
- name: Billy Bob
```
and a script update_instructions.yaml of:
```yaml
b.c: 3
b.e[0].name: Howdy Partner
```
then
```bash ```bash
yaml w -s update_instructions.yaml sample.yaml yq e '.a.b | length' f1.yml f2.yml
```
will output:
```yaml
b:
c: 3
e:
- name: Howdy Partner
``` ```
## Converting to and from json ## Known Issues / Missing Features
- `yq` attempts to preserve comment positions and whitespace as much as possible, but it does not handle all scenarios (see https://github.com/go-yaml/yaml/tree/v3 for details)
### Yaml2json
To convert output to json, use the --tojson (or -j) flag. This can be used with any command.
### json2yaml
To read in json, use the --fromjson (or -J) flag. This can be used with any command.

13
action.yml Normal file
View File

@ -0,0 +1,13 @@
name: 'yq - portable yaml processor'
description: 'create, read, update, delete, merge, validate and do more with yaml'
icon: command
color: gray-dark
inputs:
cmd:
description: 'The Command which should be run'
required: true
runs:
using: 'docker'
image: 'github-action/Dockerfile'
args:
- ${{ inputs.cmd }}

83
cmd/commands_test.go Normal file
View File

@ -0,0 +1,83 @@
package cmd
// import (
// "strings"
// "testing"
// "github.com/mikefarah/yq/v3/test"
// "github.com/spf13/cobra"
// )
// func getRootCommand() *cobra.Command {
// return New()
// }
// func TestRootCmd(t *testing.T) {
// cmd := getRootCommand()
// result := test.RunCmd(cmd, "")
// if result.Error != nil {
// t.Error(result.Error)
// }
// if !strings.Contains(result.Output, "Usage:") {
// t.Error("Expected usage message to be printed out, but the usage message was not found.")
// }
// }
// func TestRootCmd_Help(t *testing.T) {
// cmd := getRootCommand()
// result := test.RunCmd(cmd, "--help")
// if result.Error != nil {
// t.Error(result.Error)
// }
// if !strings.Contains(result.Output, "yq is a lightweight and portable command-line YAML processor. It aims to be the jq or sed of yaml files.") {
// t.Error("Expected usage message to be printed out, but the usage message was not found.")
// }
// }
// func TestRootCmd_VerboseLong(t *testing.T) {
// cmd := getRootCommand()
// result := test.RunCmd(cmd, "--verbose")
// if result.Error != nil {
// t.Error(result.Error)
// }
// if !verbose {
// t.Error("Expected verbose to be true")
// }
// }
// func TestRootCmd_VerboseShort(t *testing.T) {
// cmd := getRootCommand()
// result := test.RunCmd(cmd, "-v")
// if result.Error != nil {
// t.Error(result.Error)
// }
// if !verbose {
// t.Error("Expected verbose to be true")
// }
// }
// func TestRootCmd_VersionShort(t *testing.T) {
// cmd := getRootCommand()
// result := test.RunCmd(cmd, "-V")
// if result.Error != nil {
// t.Error(result.Error)
// }
// if !strings.Contains(result.Output, "yq version") {
// t.Error("expected version message to be printed out, but the message was not found.")
// }
// }
// func TestRootCmd_VersionLong(t *testing.T) {
// cmd := getRootCommand()
// result := test.RunCmd(cmd, "--version")
// if result.Error != nil {
// t.Error(result.Error)
// }
// if !strings.Contains(result.Output, "yq version") {
// t.Error("expected version message to be printed out, but the message was not found.")
// }
// }

19
cmd/constant.go Normal file
View File

@ -0,0 +1,19 @@
package cmd
var unwrapScalar = true
var writeInplace = false
var outputToJSON = false
var exitStatus = false
var forceColor = false
var forceNoColor = false
var colorsEnabled = false
var indent = 2
var noDocSeparators = false
var nullInput = false
var verbose = false
var version = false
var prettyPrint = false
var completedSuccessfully = false

104
cmd/evaluate_all_command.go Normal file
View File

@ -0,0 +1,104 @@
package cmd
import (
"errors"
"fmt"
"os"
"github.com/mikefarah/yq/v4/pkg/yqlib"
"github.com/spf13/cobra"
)
func createEvaluateAllCommand() *cobra.Command {
var cmdEvalAll = &cobra.Command{
Use: "eval-all [expression] [yaml_file1]...",
Aliases: []string{"ea"},
Short: "Loads _all_ yaml documents of _all_ yaml files and runs expression once",
Example: `
# merges f2.yml into f1.yml (inplace)
yq eval-all --inplace 'select(fileIndex == 0) * select(fileIndex == 1)' f1.yml f2.yml
# use '-' as a filename to read from STDIN
cat file2.yml | yq ea '.a.b' file1.yml - file3.yml
`,
Long: "Evaluate All:\nUseful when you need to run an expression across several yaml documents or files. Consumes more memory than eval",
RunE: evaluateAll,
}
return cmdEvalAll
}
func evaluateAll(cmd *cobra.Command, args []string) error {
cmd.SilenceUsage = true
// 0 args, read std in
// 1 arg, null input, process expression
// 1 arg, read file in sequence
// 2+ args, [0] = expression, file the rest
var err error
stat, _ := os.Stdin.Stat()
pipingStdIn := (stat.Mode() & os.ModeCharDevice) == 0
out := cmd.OutOrStdout()
fileInfo, _ := os.Stdout.Stat()
if forceColor || (!forceNoColor && (fileInfo.Mode()&os.ModeCharDevice) != 0) {
colorsEnabled = true
}
firstFileIndex := -1
if !nullInput && len(args) == 1 {
firstFileIndex = 0
} else if len(args) > 1 {
firstFileIndex = 1
}
if writeInplace && (firstFileIndex == -1) {
return fmt.Errorf("Write inplace flag only applicable when giving an expression and at least one file")
}
if writeInplace {
// only use colors if its forced
colorsEnabled = forceColor
writeInPlaceHandler := yqlib.NewWriteInPlaceHandler(args[firstFileIndex])
out, err = writeInPlaceHandler.CreateTempFile()
if err != nil {
return err
}
// need to indirectly call the function so that completedSuccessfully is
// passed when we finish execution as opposed to now
defer func() { writeInPlaceHandler.FinishWriteInPlace(completedSuccessfully) }()
}
if nullInput && len(args) > 1 {
return errors.New("Cannot pass files in when using null-input flag")
}
printer := yqlib.NewPrinter(out, outputToJSON, unwrapScalar, colorsEnabled, indent, !noDocSeparators)
allAtOnceEvaluator := yqlib.NewAllAtOnceEvaluator()
switch len(args) {
case 0:
if pipingStdIn {
err = allAtOnceEvaluator.EvaluateFiles(processExpression(""), []string{"-"}, printer)
} else {
cmd.Println(cmd.UsageString())
return nil
}
case 1:
if nullInput {
err = yqlib.NewStreamEvaluator().EvaluateNew(processExpression(args[0]), printer)
} else {
err = allAtOnceEvaluator.EvaluateFiles(processExpression(""), []string{args[0]}, printer)
}
default:
err = allAtOnceEvaluator.EvaluateFiles(processExpression(args[0]), args[1:], printer)
}
completedSuccessfully = err == nil
if err == nil && exitStatus && !printer.PrintedAnything() {
return errors.New("no matches found")
}
return err
}

Some files were not shown because too many files have changed in this diff Show More