Script 'mail_helper' called by obssrc
Hello community,
here is the log from the commit of package yq for openSUSE:Factory checked in at 2023-03-31 21:16:02
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/yq (Old)
and /work/SRC/openSUSE:Factory/.yq.new.31432 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "yq"
Fri Mar 31 21:16:02 2023 rev:9 rq:1076429 version:4.33.2
Changes:
--------
--- /work/SRC/openSUSE:Factory/yq/yq.changes 2023-03-28 17:52:06.787770334 +0200
+++ /work/SRC/openSUSE:Factory/.yq.new.31432/yq.changes 2023-03-31 21:16:10.726569401 +0200
@@ -1,0 +2,11 @@
+Fri Mar 31 08:42:46 UTC 2023 - Dirk M��ller
+
+- update to 4.33.2:
+ * Add ``--nul-output|-0`` flag to separate element with NUL
+ character (#1550) Thanks @vaab!
+ * Add removable-media interface plug declaration to the snap
+ packaging(#1618) Thanks @brlin-tw!
+ * Scalar output now handled in csv, tsv and property files
+ * Bumped dependency versions
+
+-------------------------------------------------------------------
Old:
----
yq-4.33.1.tar.gz
New:
----
yq-4.33.2.tar.gz
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ yq.spec ++++++
--- /var/tmp/diff_new_pack.KMyS95/_old 2023-03-31 21:16:11.742574249 +0200
+++ /var/tmp/diff_new_pack.KMyS95/_new 2023-03-31 21:16:11.762574344 +0200
@@ -20,7 +20,7 @@
%global import_path %{provider_prefix}
Name: yq
-Version: 4.33.1
+Version: 4.33.2
Release: 0
Summary: A portable command-line YAML processor
License: MIT
++++++ vendor.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/vendor/github.com/alecthomas/participle/v2/COPYING new/vendor/github.com/alecthomas/participle/v2/COPYING
--- old/vendor/github.com/alecthomas/participle/v2/COPYING 2023-03-27 21:29:51.000000000 +0200
+++ new/vendor/github.com/alecthomas/participle/v2/COPYING 2023-03-31 09:53:37.000000000 +0200
@@ -1,4 +1,4 @@
-Copyright (C) 2017 Alec Thomas
+Copyright (C) 2017-2022 Alec Thomas
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/vendor/github.com/alecthomas/participle/v2/lexer/api.go new/vendor/github.com/alecthomas/participle/v2/lexer/api.go
--- old/vendor/github.com/alecthomas/participle/v2/lexer/api.go 2023-03-27 21:29:51.000000000 +0200
+++ new/vendor/github.com/alecthomas/participle/v2/lexer/api.go 2023-03-31 09:53:37.000000000 +0200
@@ -70,7 +70,7 @@
//
// eg.
//
-// lex = lexer.Must(lexer.Build(`Symbol = "symbol" .`))
+// lex = lexer.Must(lexer.Build(`Symbol = "symbol" .`))
func Must(def Definition, err error) Definition {
if err != nil {
panic(err)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/vendor/github.com/alecthomas/participle/v2/lexer/codegen.go new/vendor/github.com/alecthomas/participle/v2/lexer/codegen.go
--- old/vendor/github.com/alecthomas/participle/v2/lexer/codegen.go 2023-03-27 21:29:51.000000000 +0200
+++ new/vendor/github.com/alecthomas/participle/v2/lexer/codegen.go 1970-01-01 01:00:00.000000000 +0100
@@ -1,439 +0,0 @@
-package lexer
-
-import (
- "fmt"
- "io"
- "regexp"
- "regexp/syntax"
- "sort"
- "text/template"
- "unicode/utf8"
-)
-
-var codegenBackrefRe = regexp.MustCompile(`(\\+)(\d)`)
-
-var codegenTemplate *template.Template = template.Must(template.New("lexgen").Funcs(template.FuncMap{
- "IsPush": func(r Rule) string {
- if p, ok := r.Action.(ActionPush); ok {
- return p.State
- }
- return ""
- },
- "IsPop": func(r Rule) bool {
- _, ok := r.Action.(ActionPop)
- return ok
- },
- "IsReturn": func(r Rule) bool {
- return r == ReturnRule
- },
- "OrderRules": orderRules,
- "HaveBackrefs": func(def *StatefulDefinition, state string) bool {
- for _, rule := range def.Rules()[state] {
- if codegenBackrefRe.MatchString(rule.Pattern) {
- return true
- }
- }
- return false
- },
-}).Parse(`
-// Code generated by Participle. DO NOT EDIT.
-package {{.Package}}
-
-import (
- "io"
- "strings"
- "unicode/utf8"
- "regexp/syntax"
-
- "github.com/alecthomas/participle/v2"
- "github.com/alecthomas/participle/v2/lexer"
-)
-
-var _ syntax.Op
-
-var Lexer lexer.Definition = definitionImpl{}
-
-type definitionImpl struct {}
-
-func (definitionImpl) Symbols() map[string]lexer.TokenType {
- return map[string]lexer.TokenType{
-{{- range $sym, $rn := .Def.Symbols}}
- "{{$sym}}": {{$rn}},
-{{- end}}
- }
-}
-
-func (definitionImpl) LexString(filename string, s string) (lexer.Lexer, error) {
- return &lexerImpl{
- s: s,
- pos: lexer.Position{
- Filename: filename,
- Line: 1,
- Column: 1,
- },
- states: []lexerState{lexerState{name: "Root"}},
- }, nil
-}
-
-func (d definitionImpl) LexBytes(filename string, b []byte) (lexer.Lexer, error) {
- return d.LexString(filename, string(b))
-}
-
-func (d definitionImpl) Lex(filename string, r io.Reader) (lexer.Lexer, error) {
- s := &strings.Builder{}
- _, err := io.Copy(s, r)
- if err != nil {
- return nil, err
- }
- return d.LexString(filename, s.String())
-}
-
-type lexerState struct {
- name string
- groups []string
-}
-
-type lexerImpl struct {
- s string
- p int
- pos lexer.Position
- states []lexerState
-}
-
-func (l *lexerImpl) Next() (lexer.Token, error) {
- if l.p == len(l.s) {
- return lexer.EOFToken(l.pos), nil
- }
- var (
- state = l.states[len(l.states)-1]
- groups []int
- sym lexer.TokenType
- )
- switch state.name {
-{{- range $state := .Def.Rules|OrderRules}}
- case "{{$state.Name}}":
-{{- range $i, $rule := $state.Rules}}
- {{- if $i}} else {{end -}}
-{{- if .Pattern -}}
- if match := match{{.Name}}(l.s, l.p); match[1] != 0 {
- sym = {{index $.Def.Symbols .Name}}
- groups = match[:]
-{{- else if .|IsReturn -}}
- if true {
-{{- end}}
-{{- if .|IsPush}}
- l.states = append(l.states, lexerState{name: "{{.|IsPush}}"{{if HaveBackrefs $.Def $state.Name}}, groups: l.sgroups(groups){{end}}})
-{{- else if (or (.|IsPop) (.|IsReturn))}}
- l.states = l.states[:len(l.states)-1]
-{{- if .|IsReturn}}
- return l.Next()
-{{- end}}
-{{- else if not .Action}}
-{{- else}}
- Unsupported action {{.Action}}
-{{- end}}
- }
-{{- end}}
-{{- end}}
- }
- if groups == nil {
- sample := []rune(l.s[l.p:])
- if len(sample) > 16 {
- sample = append(sample[:16], []rune("...")...)
- }
- return lexer.Token{}, participle.Errorf(l.pos, "invalid input text %q", sample)
- }
- pos := l.pos
- span := l.s[groups[0]:groups[1]]
- l.p = groups[1]
- l.pos.Advance(span)
- return lexer.Token{
- Type: sym,
- Value: span,
- Pos: pos,
- }, nil
-}
-
-func (l *lexerImpl) sgroups(match []int) []string {
- sgroups := make([]string, len(match)/2)
- for i := 0; i < len(match)-1; i += 2 {
- sgroups[i/2] = l.s[l.p+match[i]:l.p+match[i+1]]
- }
- return sgroups
-}
-
-`))
-
-// ExperimentalGenerateLexer generates Go code implementing the given stateful lexer.
-//
-// The generated code should in general by around 10x faster and produce zero garbage per token.
-//
-// NOTE: This is an experimental interface and subject to change.
-func ExperimentalGenerateLexer(w io.Writer, pkg string, def *StatefulDefinition) error {
- type ctx struct {
- Package string
- Def *StatefulDefinition
- }
- rules := def.Rules()
- err := codegenTemplate.Execute(w, ctx{pkg, def})
- if err != nil {
- return err
- }
- seen := map[string]bool{} // Rules can be duplicated by Include().
- for _, rules := range orderRules(rules) {
- for _, rule := range rules.Rules {
- if rule.Name == "" {
- panic(rule)
- }
- if seen[rule.Name] {
- continue
- }
- seen[rule.Name] = true
- fmt.Fprintf(w, "\n")
- err := generateRegexMatch(w, rule.Name, rule.Pattern)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-type orderedRule struct {
- Name string
- Rules []Rule
-}
-
-func orderRules(rules Rules) []orderedRule {
- orderedRules := []orderedRule{}
- for name, rules := range rules {
- orderedRules = append(orderedRules, orderedRule{
- Name: name,
- Rules: rules,
- })
- }
- sort.Slice(orderedRules, func(i, j int) bool {
- return orderedRules[i].Name < orderedRules[j].Name
- })
- return orderedRules
-}
-
-func generateRegexMatch(w io.Writer, name, pattern string) error {
- re, err := syntax.Parse(pattern, syntax.Perl)
- if err != nil {
- return err
- }
- ids := map[string]int{}
- idn := 0
- reid := func(re *syntax.Regexp) int {
- key := re.Op.String() + ":" + re.String()
- id, ok := ids[key]
- if ok {
- return id
- }
- id = idn
- idn++
- ids[key] = id
- return id
- }
- exists := func(re *syntax.Regexp) bool {
- key := re.Op.String() + ":" + re.String()
- _, ok := ids[key]
- return ok
- }
- re = re.Simplify()
- fmt.Fprintf(w, "// %s\n", re)
- fmt.Fprintf(w, "func match%s(s string, p int) (groups [%d]int) {\n", name, 2*re.MaxCap()+2)
- flattened := flatten(re)
-
- // Fast-path a single literal.
- if len(flattened) == 1 && re.Op == syntax.OpLiteral {
- n := utf8.RuneCountInString(string(re.Rune))
- if n == 1 {
- fmt.Fprintf(w, "if p < len(s) && s[p] == %q {\n", re.Rune[0])
- } else {
- fmt.Fprintf(w, "if p+%d < len(s) && s[p:p+%d] == %q {\n", n, n, string(re.Rune))
- }
- fmt.Fprintf(w, "groups[0] = p\n")
- fmt.Fprintf(w, "groups[1] = p + %d\n", n)
- fmt.Fprintf(w, "}\n")
- fmt.Fprintf(w, "return\n")
- fmt.Fprintf(w, "}\n")
- return nil
- }
- for _, re := range flattened {
- if exists(re) {
- continue
- }
- fmt.Fprintf(w, "// %s (%s)\n", re, re.Op)
- fmt.Fprintf(w, "l%d := func(s string, p int) int {\n", reid(re))
- if re.Flags&syntax.NonGreedy != 0 {
- panic("non-greedy match not supported: " + re.String())
- }
- switch re.Op {
- case syntax.OpNoMatch: // matches no strings
- fmt.Fprintf(w, "return p\n")
-
- case syntax.OpEmptyMatch: // matches empty string
- fmt.Fprintf(w, "if len(s) == 0 { return p }\n")
- fmt.Fprintf(w, "return -1\n")
-
- case syntax.OpLiteral: // matches Runes sequence
- n := utf8.RuneCountInString(string(re.Rune))
- if n == 1 {
- fmt.Fprintf(w, "if p < len(s) && s[p] == %q { return p+1 }\n", re.Rune[0])
- } else {
- fmt.Fprintf(w, "if p+%d < len(s) && s[p:p+%d] == %q { return p+%d }\n", n, n, string(re.Rune), n)
- }
- fmt.Fprintf(w, "return -1\n")
-
- case syntax.OpCharClass: // matches Runes interpreted as range pair list
- fmt.Fprintf(w, "if len(s) <= p { return -1 }\n")
- needDecode := false
- for i := 0; i < len(re.Rune); i += 2 {
- l, r := re.Rune[i], re.Rune[i+1]
- ln, rn := utf8.RuneLen(l), utf8.RuneLen(r)
- if ln != 1 || rn != 1 {
- needDecode = true
- break
- }
- }
- if needDecode {
- fmt.Fprintf(w, "var (rn rune; n int)\n")
- decodeRune(w, "p", "rn", "n")
- } else {
- fmt.Fprintf(w, "rn := s[p]\n")
- }
- fmt.Fprintf(w, "switch {\n")
- for i := 0; i < len(re.Rune); i += 2 {
- l, r := re.Rune[i], re.Rune[i+1]
- ln, rn := utf8.RuneLen(l), utf8.RuneLen(r)
- if ln == 1 && rn == 1 {
- if l == r {
- fmt.Fprintf(w, "case rn == %q: return p+1\n", l)
- } else {
- fmt.Fprintf(w, "case rn >= %q && rn <= %q: return p+1\n", l, r)
- }
- } else {
- if l == r {
- fmt.Fprintf(w, "case rn == %q: return p+n\n", l)
- } else {
- fmt.Fprintf(w, "case rn >= %q && rn <= %q: return p+n\n", l, r)
- }
- }
- }
- fmt.Fprintf(w, "}\n")
- fmt.Fprintf(w, "return -1\n")
-
- case syntax.OpAnyCharNotNL: // matches any character except newline
- fmt.Fprintf(w, "var (rn rune; n int)\n")
- decodeRune(w, "p", "rn", "n")
- fmt.Fprintf(w, "if len(s) <= p+n || rn == '\\n' { return -1 }\n")
- fmt.Fprintf(w, "return p+n\n")
-
- case syntax.OpAnyChar: // matches any character
- fmt.Fprintf(w, "var n int\n")
- fmt.Fprintf(w, "if s[p] < utf8.RuneSelf {\n")
- fmt.Fprintf(w, " n = 1\n")
- fmt.Fprintf(w, "} else {\n")
- fmt.Fprintf(w, " _, n = utf8.DecodeRuneInString(s[p:])\n")
- fmt.Fprintf(w, "}\n")
- fmt.Fprintf(w, "if len(s) <= p+n { return -1 }\n")
- fmt.Fprintf(w, "return p+n\n")
-
- case syntax.OpWordBoundary, syntax.OpNoWordBoundary,
- syntax.OpBeginText, syntax.OpEndText,
- syntax.OpBeginLine, syntax.OpEndLine:
- fmt.Fprintf(w, "var l, u rune = -1, -1\n")
- fmt.Fprintf(w, "if p == 0 {\n")
- decodeRune(w, "0", "u", "_")
- fmt.Fprintf(w, "} else if p == len(s) {\n")
- fmt.Fprintf(w, " l, _ = utf8.DecodeLastRuneInString(s)\n")
- fmt.Fprintf(w, "} else {\n")
- fmt.Fprintf(w, " var ln int\n")
- decodeRune(w, "p", "l", "ln")
- fmt.Fprintf(w, " if p+ln <= len(s) {\n")
- decodeRune(w, "p+ln", "u", "_")
- fmt.Fprintf(w, " }\n")
- fmt.Fprintf(w, "}\n")
- fmt.Fprintf(w, "op := syntax.EmptyOpContext(l, u)\n")
- lut := map[syntax.Op]string{
- syntax.OpWordBoundary: "EmptyWordBoundary",
- syntax.OpNoWordBoundary: "EmptyNoWordBoundary",
- syntax.OpBeginText: "EmptyBeginText",
- syntax.OpEndText: "EmptyEndText",
- syntax.OpBeginLine: "EmptyBeginLine",
- syntax.OpEndLine: "EmptyEndLine",
- }
- fmt.Fprintf(w, "if op & syntax.%s != 0 { return p }\n", lut[re.Op])
- fmt.Fprintf(w, "return -1\n")
-
- case syntax.OpCapture: // capturing subexpression with index Cap, optional name Name
- fmt.Fprintf(w, "np := l%d(s, p)\n", reid(re.Sub0[0]))
- fmt.Fprintf(w, "if np != -1 {\n")
- fmt.Fprintf(w, " groups[%d] = p\n", re.Cap*2)
- fmt.Fprintf(w, " groups[%d] = np\n", re.Cap*2+1)
- fmt.Fprintf(w, "}\n")
- fmt.Fprintf(w, "return np")
-
- case syntax.OpStar: // matches Sub[0] zero or more times
- fmt.Fprintf(w, "for len(s) > p {\n")
- fmt.Fprintf(w, "if np := l%d(s, p); np == -1 { return p } else { p = np }\n", reid(re.Sub0[0]))
- fmt.Fprintf(w, "}\n")
- fmt.Fprintf(w, "return p\n")
-
- case syntax.OpPlus: // matches Sub[0] one or more times
- fmt.Fprintf(w, "if p = l%d(s, p); p == -1 { return -1 }\n", reid(re.Sub0[0]))
- fmt.Fprintf(w, "for len(s) > p {\n")
- fmt.Fprintf(w, "if np := l%d(s, p); np == -1 { return p } else { p = np }\n", reid(re.Sub0[0]))
- fmt.Fprintf(w, "}\n")
- fmt.Fprintf(w, "return p\n")
-
- case syntax.OpQuest: // matches Sub[0] zero or one times
- fmt.Fprintf(w, "if np := l%d(s, p); np != -1 { return np }\n", reid(re.Sub0[0]))
- fmt.Fprintf(w, "return p\n")
-
- case syntax.OpRepeat: // matches Sub[0] at least Min times, at most Max (Max == -1 is no limit)
- panic("??")
-
- case syntax.OpConcat: // matches concatenation of Subs
- for _, sub := range re.Sub {
- fmt.Fprintf(w, "if p = l%d(s, p); p == -1 { return -1 }\n", reid(sub))
- }
- fmt.Fprintf(w, "return p\n")
-
- case syntax.OpAlternate: // matches alternation of Subs
- for _, sub := range re.Sub {
- fmt.Fprintf(w, "if np := l%d(s, p); np != -1 { return np }\n", reid(sub))
- }
- fmt.Fprintf(w, "return -1\n")
- }
- fmt.Fprintf(w, "}\n")
- }
- fmt.Fprintf(w, "np := l%d(s, p)\n", reid(re))
- fmt.Fprintf(w, "if np == -1 {\n")
- fmt.Fprintf(w, " return\n")
- fmt.Fprintf(w, "}\n")
- fmt.Fprintf(w, "groups[0] = p\n")
- fmt.Fprintf(w, "groups[1] = np\n")
- fmt.Fprintf(w, "return\n")
- fmt.Fprintf(w, "}\n")
- return nil
-}
-
-// This exists because of https://github.com/golang/go/issues/31666
-func decodeRune(w io.Writer, offset string, rn string, n string) {
- fmt.Fprintf(w, "if s[%s] < utf8.RuneSelf {\n", offset)
- fmt.Fprintf(w, " %s, %s = rune(s[%s]), 1\n", rn, n, offset)
- fmt.Fprintf(w, "} else {\n")
- fmt.Fprintf(w, " %s, %s = utf8.DecodeRuneInString(s[%s:])\n", rn, n, offset)
- fmt.Fprintf(w, "}\n")
-}
-
-func flatten(re *syntax.Regexp) (out []*syntax.Regexp) {
- for _, sub := range re.Sub {
- out = append(out, flatten(sub)...)
- }
- out = append(out, re)
- return
-}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/vendor/github.com/alecthomas/participle/v2/lexer/peek.go new/vendor/github.com/alecthomas/participle/v2/lexer/peek.go
--- old/vendor/github.com/alecthomas/participle/v2/lexer/peek.go 2023-03-27 21:29:51.000000000 +0200
+++ new/vendor/github.com/alecthomas/participle/v2/lexer/peek.go 2023-03-31 09:53:37.000000000 +0200
@@ -2,16 +2,23 @@
// PeekingLexer supports arbitrary lookahead as well as cloning.
type PeekingLexer struct {
- rawCursor RawCursor
- cursor int
- eof Token
- tokens []Token
- elide map[TokenType]bool
+ Checkpoint
+ tokens []Token
+ elide map[TokenType]bool
}
// RawCursor index in the token stream.
type RawCursor int
+// Checkpoint wraps the mutable state of the PeekingLexer.
+//
+// Copying and restoring just this state is a bit faster than copying the entire PeekingLexer.
+type Checkpoint struct {
+ rawCursor RawCursor // The raw position of the next possibly elided token
+ nextCursor RawCursor // The raw position of the next non-elided token
+ cursor int // Index of the next non-elided token among other non-elided tokens
+}
+
// Upgrade a Lexer to a PeekingLexer with arbitrary lookahead.
//
// "elide" is a slice of token types to elide from processing.
@@ -27,12 +34,12 @@
if err != nil {
return r, err
}
+ r.tokens = append(r.tokens, t)
if t.EOF() {
- r.eof = t
break
}
- r.tokens = append(r.tokens, t)
}
+ r.advanceToNonElided()
return r, nil
}
@@ -42,39 +49,48 @@
}
// Cursor position in tokens, excluding elided tokens.
-func (p *PeekingLexer) Cursor() int {
- return p.cursor
+func (c Checkpoint) Cursor() int {
+ return c.cursor
}
// RawCursor position in tokens, including elided tokens.
-func (p *PeekingLexer) RawCursor() RawCursor {
- return p.rawCursor
+func (c Checkpoint) RawCursor() RawCursor {
+ return c.rawCursor
}
// Next consumes and returns the next token.
-func (p *PeekingLexer) Next() Token {
- for int(p.rawCursor) < len(p.tokens) {
- t := p.tokens[p.rawCursor]
- p.rawCursor++
- if p.elide[t.Type] {
- continue
- }
- p.cursor++
+func (p *PeekingLexer) Next() *Token {
+ t := &p.tokens[p.nextCursor]
+ if t.EOF() {
return t
}
- return p.eof
+ p.nextCursor++
+ p.rawCursor = p.nextCursor
+ p.cursor++
+ p.advanceToNonElided()
+ return t
+}
+
+// Peek ahead at the next non-elided token.
+func (p *PeekingLexer) Peek() *Token {
+ return &p.tokens[p.nextCursor]
+}
+
+// RawPeek peeks ahead at the next raw token.
+//
+// Unlike Peek, this will include elided tokens.
+func (p *PeekingLexer) RawPeek() *Token {
+ return &p.tokens[p.rawCursor]
}
-// Peek ahead at the next token.
-func (p *PeekingLexer) Peek() Token {
- for i := int(p.rawCursor); i < len(p.tokens); i++ {
- t := p.tokens[i]
- if p.elide[t.Type] {
- continue
+// advanceToNonElided advances nextCursor to the closest non-elided token
+func (p *PeekingLexer) advanceToNonElided() {
+ for ; ; p.nextCursor++ {
+ t := &p.tokens[p.nextCursor]
+ if t.EOF() || !p.elide[t.Type] {
+ return
}
- return t
}
- return p.eof
}
// PeekAny peeks forward over elided and non-elided tokens.
@@ -85,42 +101,33 @@
// The returned RawCursor position is the location of the returned token.
// Use FastForward to move the internal cursors forward.
func (p *PeekingLexer) PeekAny(match func(Token) bool) (t Token, rawCursor RawCursor) {
- tokenCount := RawCursor(len(p.tokens))
- for i := p.rawCursor; i < tokenCount; i++ {
+ for i := p.rawCursor; ; i++ {
t = p.tokens[i]
- if match(t) || !p.elide[t.Type] {
+ if t.EOF() || match(t) || !p.elide[t.Type] {
return t, i
}
}
- return p.eof, tokenCount
}
// FastForward the internal cursors to this RawCursor position.
func (p *PeekingLexer) FastForward(rawCursor RawCursor) {
- tokenCount := RawCursor(len(p.tokens))
- for ; p.rawCursor <= rawCursor && p.rawCursor < tokenCount; p.rawCursor++ {
- t := p.tokens[p.rawCursor]
- if p.elide[t.Type] {
- continue
+ for ; p.rawCursor <= rawCursor; p.rawCursor++ {
+ t := &p.tokens[p.rawCursor]
+ if t.EOF() {
+ break
+ }
+ if !p.elide[t.Type] {
+ p.cursor++
}
- p.cursor++
}
+ p.nextCursor = p.rawCursor
+ p.advanceToNonElided()
}
-// RawPeek peeks ahead at the next raw token.
-//
-// Unlike Peek, this will include elided tokens.
-func (p *PeekingLexer) RawPeek() Token {
- if int(p.rawCursor) < len(p.tokens) {
- return p.tokens[p.rawCursor]
- }
- return p.eof
+func (p *PeekingLexer) MakeCheckpoint() Checkpoint {
+ return p.Checkpoint
}
-// Clone creates a clone of this PeekingLexer at its current token.
-//
-// The parent and clone are completely independent.
-func (p *PeekingLexer) Clone() *PeekingLexer {
- clone := *p
- return &clone
+func (p *PeekingLexer) LoadCheckpoint(checkpoint Checkpoint) {
+ p.Checkpoint = checkpoint
}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/vendor/github.com/alecthomas/participle/v2/lexer/simple.go new/vendor/github.com/alecthomas/participle/v2/lexer/simple.go
--- old/vendor/github.com/alecthomas/participle/v2/lexer/simple.go 2023-03-27 21:29:51.000000000 +0200
+++ new/vendor/github.com/alecthomas/participle/v2/lexer/simple.go 2023-03-31 09:53:37.000000000 +0200
@@ -9,8 +9,8 @@
// MustSimple creates a new Stateful lexer with only a single root state.
//
// It panics if there is an error.
-func MustSimple(rules []SimpleRule, options ...Option) *StatefulDefinition {
- def, err := NewSimple(rules, options...)
+func MustSimple(rules []SimpleRule) *StatefulDefinition {
+ def, err := NewSimple(rules)
if err != nil {
panic(err)
}
@@ -18,10 +18,10 @@
}
// NewSimple creates a new Stateful lexer with only a single root state.
-func NewSimple(rules []SimpleRule, options ...Option) (*StatefulDefinition, error) {
+func NewSimple(rules []SimpleRule) (*StatefulDefinition, error) {
fullRules := make([]Rule, len(rules))
for i, rule := range rules {
fullRules[i] = Rule{Name: rule.Name, Pattern: rule.Pattern}
}
- return New(Rules{"Root": fullRules}, options...)
+ return New(Rules{"Root": fullRules})
}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/vendor/github.com/alecthomas/participle/v2/lexer/stateful.go new/vendor/github.com/alecthomas/participle/v2/lexer/stateful.go
--- old/vendor/github.com/alecthomas/participle/v2/lexer/stateful.go 2023-03-27 21:29:51.000000000 +0200
+++ new/vendor/github.com/alecthomas/participle/v2/lexer/stateful.go 2023-03-31 09:53:37.000000000 +0200
@@ -1,6 +1,7 @@
package lexer
import (
+ "encoding/json"
"errors"
"fmt"
"io"
@@ -16,14 +17,101 @@
backrefReplace = regexp.MustCompile(`(\\+)(\d)`)
)
-// Option for modifying how the Lexer works.
-type Option func(d *StatefulDefinition)
-
// A Rule matching input and possibly changing state.
type Rule struct {
- Name string
- Pattern string
- Action Action
+ Name string `json:"name"`
+ Pattern string `json:"pattern"`
+ Action Action `json:"action"`
+}
+
+var _ json.Marshaler = &Rule{}
+var _ json.Unmarshaler = &Rule{}
+
+type jsonRule struct {
+ Name string `json:"name,omitempty"`
+ Pattern string `json:"pattern,omitempty"`
+ Action json.RawMessage `json:"action,omitempty"`
+}
+
+func (r *Rule) UnmarshalJSON(data []byte) error {
+ jrule := jsonRule{}
+ err := json.Unmarshal(data, &jrule)
+ if err != nil {
+ return err
+ }
+ r.Name = jrule.Name
+ r.Pattern = jrule.Pattern
+ jaction := struct {
+ Kind string `json:"kind"`
+ }{}
+ if jrule.Action == nil {
+ return nil
+ }
+ err = json.Unmarshal(jrule.Action, &jaction)
+ if err != nil {
+ return fmt.Errorf("could not unmarshal action %q: %w", string(jrule.Action), err)
+ }
+ var action Action
+ switch jaction.Kind {
+ case "push":
+ actual := ActionPush{}
+ if err := json.Unmarshal(jrule.Action, &actual); err != nil {
+ return err
+ }
+ action = actual
+ case "pop":
+ actual := ActionPop{}
+ if err := json.Unmarshal(jrule.Action, &actual); err != nil {
+ return err
+ }
+ action = actual
+ case "include":
+ actual := include{}
+ if err := json.Unmarshal(jrule.Action, &actual); err != nil {
+ return err
+ }
+ action = actual
+ case "":
+ default:
+ return fmt.Errorf("unknown action %q", jaction.Kind)
+ }
+ r.Action = action
+ return nil
+}
+
+func (r *Rule) MarshalJSON() ([]byte, error) {
+ jrule := jsonRule{
+ Name: r.Name,
+ Pattern: r.Pattern,
+ }
+ if r.Action != nil {
+ actionData, err := json.Marshal(r.Action)
+ if err != nil {
+ return nil, fmt.Errorf("failed to map action: %w", err)
+ }
+ jaction := map[string]interface{}{}
+ err = json.Unmarshal(actionData, &jaction)
+ if err != nil {
+ return nil, fmt.Errorf("failed to map action: %w", err)
+ }
+ switch r.Action.(type) {
+ case nil:
+ case ActionPop:
+ jaction["kind"] = "pop"
+ case ActionPush:
+ jaction["kind"] = "push"
+ case include:
+ jaction["kind"] = "include"
+ default:
+ return nil, fmt.Errorf("unsupported action %T", r.Action)
+ }
+ actionJSON, err := json.Marshal(jaction)
+ if err != nil {
+ return nil, err
+ }
+ jrule.Action = actionJSON
+ }
+ return json.Marshal(&jrule)
}
// Rules grouped by name.
@@ -52,19 +140,8 @@
applyRules(state string, rule int, rules compiledRules) error
}
-// InitialState overrides the default initial state of "Root".
-func InitialState(state string) Option {
- return func(d *StatefulDefinition) {
- d.initialState = state
- }
-}
-
-// MatchLongest causes the Lexer to continue checking rules past the first match.
-// If any subsequent rule has a longer match, it will be used instead.
-func MatchLongest() Option {
- return func(d *StatefulDefinition) {
- d.matchLongest = true
- }
+type validatingRule interface {
+ validate(rules Rules) error
}
// ActionPop pops to the previous state when the Rule matches.
@@ -92,7 +169,9 @@
func Return() Rule { return ReturnRule }
// ActionPush pushes the current state and switches to "State" when the Rule matches.
-type ActionPush struct{ State string }
+type ActionPush struct {
+ State string `json:"state"`
+}
func (p ActionPush) applyAction(lexer *StatefulLexer, groups []string) error {
if groups[0] == "" {
@@ -102,6 +181,13 @@
return nil
}
+func (p ActionPush) validate(rules Rules) error {
+ if _, ok := rules[p.State]; !ok {
+ return fmt.Errorf("push to unknown state %q", p.State)
+ }
+ return nil
+}
+
// Push to the given state.
//
// The target state will then be the set of rules used for matching
@@ -110,16 +196,18 @@
return ActionPush{state}
}
-type include struct{ state string }
+type include struct {
+ State string `json:"state"`
+}
func (i include) applyAction(lexer *StatefulLexer, groups []string) error {
panic("should not be called")
}
func (i include) applyRules(state string, rule int, rules compiledRules) error {
- includedRules, ok := rules[i.state]
+ includedRules, ok := rules[i.State]
if !ok {
- return fmt.Errorf("invalid include state %q", i.state)
+ return fmt.Errorf("invalid include state %q", i.State)
}
clone := make([]compiledRule, len(includedRules))
copy(clone, includedRules)
@@ -138,13 +226,12 @@
symbols map[string]TokenType
// Map of key->*regexp.Regexp
backrefCache sync.Map
- initialState string
matchLongest bool
}
// MustStateful creates a new stateful lexer and panics if it is incorrect.
-func MustStateful(rules Rules, options ...Option) *StatefulDefinition {
- def, err := New(rules, options...)
+func MustStateful(rules Rules) *StatefulDefinition {
+ def, err := New(rules)
if err != nil {
panic(err)
}
@@ -152,10 +239,15 @@
}
// New constructs a new stateful lexer from rules.
-func New(rules Rules, options ...Option) (*StatefulDefinition, error) {
+func New(rules Rules) (*StatefulDefinition, error) {
compiled := compiledRules{}
for key, set := range rules {
for i, rule := range set {
+ if validate, ok := rule.Action.(validatingRule); ok {
+ if err := validate.validate(rules); err != nil {
+ return nil, fmt.Errorf("invalid action for rule %q: %w", rule.Name, err)
+ }
+ }
pattern := "^(?:" + rule.Pattern + ")"
var (
re *regexp.Regexp
@@ -208,16 +300,16 @@
}
}
d := &StatefulDefinition{
- initialState: "Root",
- rules: compiled,
- symbols: symbols,
- }
- for _, option := range options {
- option(d)
+ rules: compiled,
+ symbols: symbols,
}
return d, nil
}
+func (d *StatefulDefinition) MarshalJSON() ([]byte, error) {
+ return json.Marshal(d.rules)
+}
+
// Rules returns the user-provided Rules used to construct the lexer.
func (d *StatefulDefinition) Rules() Rules {
out := Rules{}
@@ -234,7 +326,7 @@
return &StatefulLexer{
def: d,
data: s,
- stack: []lexerState{{name: d.initialState}},
+ stack: []lexerState{{name: "Root"}},
pos: Position{
Filename: filename,
Line: 1,
@@ -256,6 +348,7 @@
return d.symbols
}
+// lexerState stored when switching states in the lexer.
type lexerState struct {
name string
groups []string
@@ -345,12 +438,15 @@
if candidate.RE != nil {
return candidate.RE, nil
}
-
// We don't have a compiled RE. This means there are back-references
// that need to be substituted first.
- parent := l.stack[len(l.stack)-1]
- key := candidate.Pattern + "\000" + strings.Join(parent.groups, "\000")
- cached, ok := l.def.backrefCache.Load(key)
+ return BackrefRegex(&l.def.backrefCache, candidate.Pattern, l.stack[len(l.stack)-1].groups)
+}
+
+// BackrefRegex returns a compiled regular expression with backreferences replaced by groups.
+func BackrefRegex(backrefCache *sync.Map, input string, groups []string) (*regexp.Regexp, error) {
+ key := input + "\000" + strings.Join(groups, "\000")
+ cached, ok := backrefCache.Load(key)
if ok {
return cached.(*regexp.Regexp), nil
}
@@ -359,19 +455,19 @@
re *regexp.Regexp
err error
)
- pattern := backrefReplace.ReplaceAllStringFunc(candidate.Pattern, func(s string) string {
+ pattern := backrefReplace.ReplaceAllStringFunc(input, func(s string) string {
var rematch = backrefReplace.FindStringSubmatch(s)
n, nerr := strconv.ParseInt(rematch[2], 10, 64)
if nerr != nil {
err = nerr
return s
}
- if len(parent.groups) == 0 || int(n) >= len(parent.groups) {
- err = fmt.Errorf("invalid group %d from parent with %d groups", n, len(parent.groups))
+ if len(groups) == 0 || int(n) >= len(groups) {
+ err = fmt.Errorf("invalid group %d from parent with %d groups", n, len(groups))
return s
}
// concatenate the leading \\\\ which are already escaped to the quoted match.
- return rematch[1][:len(rematch[1])-1] + regexp.QuoteMeta(parent.groups[n])
+ return rematch[1][:len(rematch[1])-1] + regexp.QuoteMeta(groups[n])
})
if err == nil {
re, err = regexp.Compile("^(?:" + pattern + ")")
@@ -379,6 +475,6 @@
if err != nil {
return nil, fmt.Errorf("invalid backref expansion: %q: %s", pattern, err)
}
- l.def.backrefCache.Store(key, re)
+ backrefCache.Store(key, re)
return re, nil
}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/vendor/github.com/goccy/go-yaml/ast/ast.go new/vendor/github.com/goccy/go-yaml/ast/ast.go
--- old/vendor/github.com/goccy/go-yaml/ast/ast.go 2023-03-27 21:29:51.000000000 +0200
+++ new/vendor/github.com/goccy/go-yaml/ast/ast.go 2023-03-31 09:53:37.000000000 +0200
@@ -1570,8 +1570,9 @@
diffLength := len(splittedValues[0]) - len(trimmedFirstValue)
if len(splittedValues) > 1 && value.Type() == StringType || value.Type() == LiteralType {
// If multi-line string, the space characters for indent have already been added, so delete them.
+ prefix := space + " "
for i := 1; i < len(splittedValues); i++ {
- splittedValues[i] = strings.TrimLeft(splittedValues[i], " ")
+ splittedValues[i] = strings.TrimPrefix(splittedValues[i], prefix)
}
}
newValues := []string{trimmedFirstValue}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/vendor/github.com/goccy/go-yaml/token/token.go new/vendor/github.com/goccy/go-yaml/token/token.go
--- old/vendor/github.com/goccy/go-yaml/token/token.go 2023-03-27 21:29:51.000000000 +0200
+++ new/vendor/github.com/goccy/go-yaml/token/token.go 2023-03-31 09:53:37.000000000 +0200
@@ -283,6 +283,28 @@
"False",
"FALSE",
}
+ // For compatibility with other YAML 1.1 parsers
+ // Note that we use these solely for encoding the bool value with quotes.
+ // go-yaml should not treat these as reserved keywords at parsing time.
+ // as go-yaml is supposed to be compliant only with YAML 1.2.
+ reservedLegacyBoolKeywords = []string{
+ "y",
+ "Y",
+ "yes",
+ "Yes",
+ "YES",
+ "n",
+ "N",
+ "no",
+ "No",
+ "NO",
+ "on",
+ "On",
+ "ON",
+ "off",
+ "Off",
+ "OFF",
+ }
reservedInfKeywords = []string{
".inf",
".Inf",
@@ -297,6 +319,11 @@
".NAN",
}
reservedKeywordMap = map[string]func(string, string, *Position) *Token{}
+ // reservedEncKeywordMap contains is the keyword map used at encoding time.
+ // This is supposed to be a superset of reservedKeywordMap,
+ // and used to quote legacy keywords present in YAML 1.1 or lesser for compatibility reasons,
+ // even though this library is supposed to be YAML 1.2-compliant.
+ reservedEncKeywordMap = map[string]func(string, string, *Position) *Token{}
)
func reservedKeywordToken(typ Type, value, org string, pos *Position) *Token {
@@ -317,7 +344,14 @@
}
}
for _, keyword := range reservedBoolKeywords {
- reservedKeywordMap[keyword] = func(value, org string, pos *Position) *Token {
+ f := func(value, org string, pos *Position) *Token {
+ return reservedKeywordToken(BoolType, value, org, pos)
+ }
+ reservedKeywordMap[keyword] = f
+ reservedEncKeywordMap[keyword] = f
+ }
+ for _, keyword := range reservedLegacyBoolKeywords {
+ reservedEncKeywordMap[keyword] = func(value, org string, pos *Position) *Token {
return reservedKeywordToken(BoolType, value, org, pos)
}
}
@@ -581,7 +615,7 @@
if value == "" {
return true
}
- if _, exists := reservedKeywordMap[value]; exists {
+ if _, exists := reservedEncKeywordMap[value]; exists {
return true
}
if stat := getNumberStat(value); stat.isNum {
@@ -589,7 +623,7 @@
}
first := value[0]
switch first {
- case '*', '&', '[', '{', '}', ']', ',', '!', '|', '>', '%', '\'', '"':
+ case '*', '&', '[', '{', '}', ']', ',', '!', '|', '>', '%', '\'', '"', '@':
return true
}
last := value[len(value)-1]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go new/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go
--- old/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go 2023-03-27 21:29:51.000000000 +0200
+++ new/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go 2023-03-31 09:53:37.000000000 +0200
@@ -58,7 +58,7 @@
// - Table and ArrayTable's children represent a dotted key (same as
// KeyValue, but without the first node being the value).
//
-// When relevant, Raw describes the range of bytes this node is refering to in
+// When relevant, Raw describes the range of bytes this node is referring to in
// the input document. Use Parser.Raw() to retrieve the actual bytes.
type Node struct {
Kind Kind
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go new/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go
--- old/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go 2023-03-27 21:29:51.000000000 +0200
+++ new/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go 2023-03-31 09:53:37.000000000 +0200
@@ -132,12 +132,12 @@
}
// Expression returns a pointer to the node representing the last successfully
-// parsed expresion.
+// parsed expression.
func (p *Parser) Expression() *Node {
return p.builder.NodeAt(p.ref)
}
-// Error returns any error that has occured during parsing.
+// Error returns any error that has occurred during parsing.
func (p *Parser) Error() error {
return p.err
}
@@ -402,6 +402,7 @@
// inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ]
parent := p.builder.Push(Node{
Kind: InlineTable,
+ Raw: p.Range(b[:1]),
})
first := true
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/vendor/modules.txt new/vendor/modules.txt
--- old/vendor/modules.txt 2023-03-27 21:29:51.000000000 +0200
+++ new/vendor/modules.txt 2023-03-31 09:53:38.000000000 +0200
@@ -1,7 +1,7 @@
# github.com/a8m/envsubst v1.4.2
## explicit; go 1.17
github.com/a8m/envsubst/parse
-# github.com/alecthomas/participle/v2 v2.0.0-beta.5
+# github.com/alecthomas/participle/v2 v2.0.0
## explicit; go 1.18
github.com/alecthomas/participle/v2/lexer
# github.com/alecthomas/repr v0.2.0
@@ -27,8 +27,8 @@
github.com/goccy/go-json/internal/encoder/vm_indent
github.com/goccy/go-json/internal/errors
github.com/goccy/go-json/internal/runtime
-# github.com/goccy/go-yaml v1.10.0
-## explicit; go 1.12
+# github.com/goccy/go-yaml v1.10.1
+## explicit; go 1.18
github.com/goccy/go-yaml/ast
github.com/goccy/go-yaml/lexer
github.com/goccy/go-yaml/printer
@@ -49,7 +49,7 @@
# github.com/mattn/go-isatty v0.0.17
## explicit; go 1.15
github.com/mattn/go-isatty
-# github.com/pelletier/go-toml/v2 v2.0.6
+# github.com/pelletier/go-toml/v2 v2.0.7
## explicit; go 1.16
github.com/pelletier/go-toml/v2/internal/characters
github.com/pelletier/go-toml/v2/internal/danger
++++++ yq-4.33.1.tar.gz -> yq-4.33.2.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/yq-4.33.1/acceptance_tests/nul-separator.sh new/yq-4.33.2/acceptance_tests/nul-separator.sh
--- old/yq-4.33.1/acceptance_tests/nul-separator.sh 1970-01-01 01:00:00.000000000 +0100
+++ new/yq-4.33.2/acceptance_tests/nul-separator.sh 2023-03-31 01:21:59.000000000 +0200
@@ -0,0 +1,286 @@
+#!/bin/bash
+
+setUp() {
+ rm test*.yml || true
+}
+
+## Convenient bash shortcut to read records of NUL separated values
+## from stdin the safe way. See example usage in the next tests.
+read-0() {
+ local eof="" IFS=''
+ while [ "$1" ]; do
+ ## - The `-r` avoids bad surprise with '\n' and other interpreted
+ ## sequences that can be read.
+ ## - The `-d ''` is the (strange?) way to refer to NUL delimiter.
+ ## - The `--` is how to avoid unpleasant surprises if your
+ ## "$1" starts with "-" (minus) sign. This protection also
+ ## will produce a readable error if you want to try to start
+ ## your variable names with a "-".
+ read -r -d '' -- "$1" || eof=1
+ shift
+ done
+ [ -z "$eof" ] ## fail on EOF
+}
+
+## Convenient bash shortcut to be used with the next function `p-err`
+## to read NUL separated values the safe way AND catch any errors from
+## the process creating the stream of NUL separated data. See example
+## usage in the tests.
+read-0-err() {
+ local ret="$1" eof="" idx=0 last=
+ read -r -- "${ret?}" <<<"0"
+ shift
+ while [ "$1" ]; do
+ last=$idx
+ read -r -d '' -- "$1" || {
+ ## Put this last value in ${!ret}
+ eof="$1"
+ read -r -- "$ret" <<<"${!eof}"
+ break
+ }
+ ((idx++))
+ shift
+ done
+ [ -z "$eof" ] || {
+ if [ "$last" != 0 ]; then
+ ## Uhoh, we have no idea if the errorlevel of the internal
+ ## command was properly delimited with a NUL char, and
+ ## anyway something went really wrong at least about the
+ ## number of fields separated by NUL char and the one
+ ## expected.
+ echo "Error: read-0-err couldn't fill all value $ret = '${!ret}', '$eof', '${!eof}'" >&2
+ read -r -- "$ret" <<<"not-enough-values"
+ else
+ if ! [[ "${!ret}" =~ ^[0-9]+$ && "${!ret}" -ge 0 && "${!ret}" -le 127 ]]; then
+ ## This could happen if you don't use `p-err` wrapper,
+ ## or used stdout in unexpected ways in your inner
+ ## command.
+ echo "Error: last value is not a number, did you finish with an errorlevel ?" >&2
+ read -r -- "$ret" <<<"last-value-not-a-number"
+ fi
+ fi
+ false
+ }
+}
+
+## Simply runs command given as argument and adds errorlevel in the
+## standard output. Is expected to be used in tandem with
+## `read-0-err`.
+p-err() {
+ local exp="$1"
+ "$@"
+ printf "%s" "$?"
+}
+
+wyq-r() {
+ local exp="$1"
+ ./yq e -0 -r=false "$1"
+ printf "%s" "$?"
+}
+
+testBasicUsageRaw() {
+ cat >test.yml < expected.out
+
+ ## We need to compare binary content here. We have to filter the compared
+ ## content through a representation that gets rid of NUL chars but accurately
+ ## transcribe the content.
+ ## Also as it would be nice to have a pretty output in case the test fails,
+ ## we use here 'hd': a widely available shortcut to 'hexdump' that will
+ ## pretty-print any binary to it's hexadecimal representation.
+ ##
+ ## Note that the standard `assertEquals` compare its arguments
+ ## value, but they can't hold NUL characters (this comes from the
+ ## limitation of the C API of `exec*(..)` functions that requires
+ ## `const char *arv[]`). And these are NUL terminated strings. As a
+ ## consequence, the NUL characters gets removed in bash arguments.
+ assertEquals "$(hd expected.out)" \
+ "$(./yq e -0 '.a, .b' test.yml | hd)"
+
+ rm expected.out
+}
+
+testBasicUsage() {
+ local a b
+ cat >test.yml <test.yml <test.yml <test.yml <test.yml <test.yml <test.yml <= 2 && data[n-2] == '\r' && data[n-1] == '\n' {
+ b.Truncate(n - 2)
+ } else if n >= 1 && (data[n-1] == '\r' || data[n-1] == '\n') {
+ b.Truncate(n - 1)
+ }
+}
+
func (p *resultsPrinter) PrintResults(matchingNodes *list.List) error {
log.Debug("PrintResults for %v matches", matchingNodes.Len())
@@ -128,18 +148,40 @@
}
}
- if err := p.encoder.PrintLeadingContent(writer, mappedDoc.LeadingContent); err != nil {
+ var destination io.Writer = writer
+ tempBuffer := bytes.NewBuffer(nil)
+ if p.nulSepOutput {
+ destination = tempBuffer
+ }
+
+ if err := p.encoder.PrintLeadingContent(destination, mappedDoc.LeadingContent); err != nil {
return err
}
- if err := p.printNode(mappedDoc.Node, writer); err != nil {
+ if err := p.printNode(mappedDoc.Node, destination); err != nil {
return err
}
- if err := p.encoder.PrintLeadingContent(writer, mappedDoc.TrailingContent); err != nil {
+ if err := p.encoder.PrintLeadingContent(destination, mappedDoc.TrailingContent); err != nil {
return err
}
+ if p.nulSepOutput {
+ removeLastEOL(tempBuffer)
+ tempBufferBytes := tempBuffer.Bytes()
+ if bytes.IndexByte(tempBufferBytes, 0) != -1 {
+ return fmt.Errorf(
+ "Can't serialize value because it contains NUL char and you are using NUL separated output",
+ )
+ }
+ if _, err := writer.Write(tempBufferBytes); err != nil {
+ return err
+ }
+ if _, err := writer.Write([]byte{0}); err != nil {
+ return err
+ }
+ }
+
p.previousDocIndex = mappedDoc.Document
if err := writer.Flush(); err != nil {
return err
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/yq-4.33.1/pkg/yqlib/printer_test.go new/yq-4.33.2/pkg/yqlib/printer_test.go
--- old/yq-4.33.1/pkg/yqlib/printer_test.go 2023-03-26 01:12:36.000000000 +0100
+++ new/yq-4.33.2/pkg/yqlib/printer_test.go 2023-03-31 01:21:59.000000000 +0200
@@ -340,3 +340,53 @@
writer.Flush()
test.AssertResult(t, expected, output.String())
}
+
+func TestPrinterNulSeparator(t *testing.T) {
+ var output bytes.Buffer
+ var writer = bufio.NewWriter(&output)
+ printer := NewSimpleYamlPrinter(writer, YamlOutputFormat, true, false, 2, false)
+ printer.SetNulSepOutput(true)
+ node, err := getExpressionParser().ParseExpression(".a")
+ if err != nil {
+ panic(err)
+ }
+ streamEvaluator := NewStreamEvaluator()
+ _, err = streamEvaluator.Evaluate("sample", strings.NewReader(multiDocSample), node, printer, NewYamlDecoder(ConfiguredYamlPreferences))
+ if err != nil {
+ panic(err)
+ }
+
+ writer.Flush()
+ expected := "banana\x00apple\x00coconut\x00"
+ test.AssertResult(t, expected, output.String())
+}
+
+func TestPrinterNulSeparatorWithJson(t *testing.T) {
+ var output bytes.Buffer
+ var writer = bufio.NewWriter(&output)
+ // note printDocSeparators is true, it should still not print document separators
+ // when outputing JSON.
+ encoder := NewJSONEncoder(0, false, false)
+ if encoder == nil {
+ t.Skipf("no support for %s output format", "json")
+ }
+ printer := NewPrinter(encoder, NewSinglePrinterWriter(writer))
+ printer.SetNulSepOutput(true)
+
+ inputs, err := readDocuments(strings.NewReader(multiDocSample), "sample.yml", 0, NewYamlDecoder(ConfiguredYamlPreferences))
+ if err != nil {
+ panic(err)
+ }
+
+ inputs.Front().Value.(*CandidateNode).LeadingContent = "# ignore this\n"
+
+ err = printer.PrintResults(inputs)
+ if err != nil {
+ panic(err)
+ }
+
+ expected := `{"a":"banana"}` + "\x00" + `{"a":"apple"}` + "\x00" + `{"a":"coconut"}` + "\x00"
+
+ writer.Flush()
+ test.AssertResult(t, expected, output.String())
+}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/yq-4.33.1/pkg/yqlib/properties_test.go new/yq-4.33.2/pkg/yqlib/properties_test.go
--- old/yq-4.33.1/pkg/yqlib/properties_test.go 2023-03-26 01:12:36.000000000 +0100
+++ new/yq-4.33.2/pkg/yqlib/properties_test.go 2023-03-31 01:21:59.000000000 +0200
@@ -163,6 +163,14 @@
scenarioType: "decode",
},
{
+ description: "print scalar",
+ skipDoc: true,
+ input: "mike = cat",
+ expression: ".mike",
+ expected: "cat\n",
+ scenarioType: "roundtrip",
+ },
+ {
description: "Roundtrip",
input: expectedPropertiesUnwrapped,
expression: `.person.pets.0 = "dog"`,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/yq-4.33.1/pkg/yqlib/xml_test.go new/yq-4.33.2/pkg/yqlib/xml_test.go
--- old/yq-4.33.1/pkg/yqlib/xml_test.go 2023-03-26 01:12:36.000000000 +0100
+++ new/yq-4.33.2/pkg/yqlib/xml_test.go 2023-03-31 01:21:59.000000000 +0200
@@ -302,6 +302,14 @@
scenarioType: "encode",
},
{
+ description: "Scalar roundtrip",
+ skipDoc: true,
+ input: "<mike>cat</mike>",
+ expression: ".mike",
+ expected: "cat",
+ scenarioType: "roundtrip",
+ },
+ {
description: "ProcInst with head comment round trip",
skipDoc: true,
input: expectedXmlProcInstAndHeadComment,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/yq-4.33.1/release_notes.txt new/yq-4.33.2/release_notes.txt
--- old/yq-4.33.1/release_notes.txt 2023-03-26 01:12:36.000000000 +0100
+++ new/yq-4.33.2/release_notes.txt 2023-03-31 01:21:59.000000000 +0200
@@ -1,3 +1,7 @@
+4.33.1:
+ - Added read-only TOML support! #1364. Thanks @pelletier for making your API available in your toml lib :)
+ - Added warning when auto detect by file type is outputs JSON (#1608)
+
4.32.2:
- Fixed behaviour for unknown file types (defaults to yaml) #1609
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/yq-4.33.1/snap/snapcraft.yaml new/yq-4.33.2/snap/snapcraft.yaml
--- old/yq-4.33.1/snap/snapcraft.yaml 2023-03-26 01:12:36.000000000 +0100
+++ new/yq-4.33.2/snap/snapcraft.yaml 2023-03-31 01:21:59.000000000 +0200
@@ -1,5 +1,5 @@
name: yq
-version: 'v4.33.1'
+version: 'v4.33.2'
summary: A lightweight and portable command-line YAML processor
description: |
The aim of the project is to be the jq or sed of yaml files.
@@ -9,7 +9,7 @@
apps:
yq:
command: yq
- plugs: [home]
+ plugs: [home, removable-media]
parts:
yq:
plugin: go