summaryrefslogtreecommitdiff
path: root/std/encoding
diff options
context:
space:
mode:
Diffstat (limited to 'std/encoding')
-rw-r--r--std/encoding/README.md219
-rw-r--r--std/encoding/csv.ts251
-rw-r--r--std/encoding/csv_test.ts592
-rw-r--r--std/encoding/hex.ts143
-rw-r--r--std/encoding/hex_test.ts182
-rw-r--r--std/encoding/testdata/CRLF.toml3
-rw-r--r--std/encoding/testdata/arrayTable.toml12
-rw-r--r--std/encoding/testdata/arrays.toml8
-rw-r--r--std/encoding/testdata/boolean.toml3
-rw-r--r--std/encoding/testdata/cargo.toml56
-rw-r--r--std/encoding/testdata/cargoTest.toml147
-rw-r--r--std/encoding/testdata/datetime.toml8
-rw-r--r--std/encoding/testdata/float.toml23
-rw-r--r--std/encoding/testdata/inlineTable.toml7
-rw-r--r--std/encoding/testdata/integer.toml20
-rw-r--r--std/encoding/testdata/simple.toml5
-rw-r--r--std/encoding/testdata/string.toml30
-rw-r--r--std/encoding/testdata/table.toml13
-rw-r--r--std/encoding/toml.ts565
-rw-r--r--std/encoding/toml_test.ts410
20 files changed, 2697 insertions, 0 deletions
diff --git a/std/encoding/README.md b/std/encoding/README.md
new file mode 100644
index 000000000..f03e80ba2
--- /dev/null
+++ b/std/encoding/README.md
@@ -0,0 +1,219 @@
+# Encoding
+
+## CSV
+
+- **`readAll(reader: BufReader, opt: ParseOptions = { comma: ",", trimLeadingSpace: false, lazyQuotes: false } ): Promise<[string[][], BufState]>`**:
+ Read the whole buffer and output the structured CSV datas
+- **`parse(csvString: string, opt: ParseOption): Promise<unknown[]>`**:
+ See [parse](###Parse)
+
+### Parse
+
+Parse the CSV string with the options provided.
+
+#### Options
+
+##### ParseOption
+
+- **`header: boolean | string[] | HeaderOption[];`**: If a boolean is provided,
+ the first line will be used as Header definitions. If `string[]` or
+ `HeaderOption[]`
+ those names will be used for header definition.
+- **`parse?: (input: unknown) => unknown;`**: Parse function for the row, which
+ will be executed after parsing of all columns. Therefore if you don't provide
+ header and parse function with headers, input will be `string[]`.
+
+##### HeaderOption
+
+- **`name: string;`**: Name of the header to be used as property.
+- **`parse?: (input: string) => unknown;`**: Parse function for the column.
+ This is executed on each entry of the header. This can be combined with the
+ Parse function of the rows.
+
+#### Usage
+
+```ts
+// input:
+// a,b,c
+// e,f,g
+
+const r = await parseFile(filepath, {
+ header: false
+});
+// output:
+// [["a", "b", "c"], ["e", "f", "g"]]
+
+const r = await parseFile(filepath, {
+ header: true
+});
+// output:
+// [{ a: "e", b: "f", c: "g" }]
+
+const r = await parseFile(filepath, {
+ header: ["this", "is", "sparta"]
+});
+// output:
+// [
+// { this: "a", is: "b", sparta: "c" },
+// { this: "e", is: "f", sparta: "g" }
+// ]
+
+const r = await parseFile(filepath, {
+ header: [
+ {
+ name: "this",
+ parse: (e: string): string => {
+ return `b${e}$$`;
+ }
+ },
+ {
+ name: "is",
+ parse: (e: string): number => {
+ return e.length;
+ }
+ },
+ {
+ name: "sparta",
+ parse: (e: string): unknown => {
+ return { bim: `boom-${e}` };
+ }
+ }
+ ]
+});
+// output:
+// [
+// { this: "ba$$", is: 1, sparta: { bim: `boom-c` } },
+// { this: "be$$", is: 1, sparta: { bim: `boom-g` } }
+// ]
+
+const r = await parseFile(filepath, {
+ header: ["this", "is", "sparta"],
+ parse: (e: Record<string, unknown>) => {
+ return { super: e.this, street: e.is, fighter: e.sparta };
+ }
+});
+// output:
+// [
+// { super: "a", street: "b", fighter: "c" },
+// { super: "e", street: "f", fighter: "g" }
+// ]
+```
+
+## TOML
+
+This module parse TOML files. It follows as much as possible the
+[TOML specs](https://github.com/toml-lang/toml). Be sure to read the supported
+types as not every specs is supported at the moment and the handling in
+TypeScript side is a bit different.
+
+### Supported types and handling
+
+- :heavy_check_mark: [Keys](https://github.com/toml-lang/toml#string)
+- :exclamation: [String](https://github.com/toml-lang/toml#string)
+- :heavy_check_mark:
+ [Multiline String](https://github.com/toml-lang/toml#string)
+- :heavy_check_mark: [Literal String](https://github.com/toml-lang/toml#string)
+- :exclamation: [Integer](https://github.com/toml-lang/toml#integer)
+- :heavy_check_mark: [Float](https://github.com/toml-lang/toml#float)
+- :heavy_check_mark: [Boolean](https://github.com/toml-lang/toml#boolean)
+- :heavy_check_mark:
+ [Offset Date-time](https://github.com/toml-lang/toml#offset-date-time)
+- :heavy_check_mark:
+ [Local Date-time](https://github.com/toml-lang/toml#local-date-time)
+- :heavy_check_mark: [Local Date](https://github.com/toml-lang/toml#local-date)
+- :exclamation: [Local Time](https://github.com/toml-lang/toml#local-time)
+- :heavy_check_mark: [Table](https://github.com/toml-lang/toml#table)
+- :heavy_check_mark: [Inline Table](https://github.com/toml-lang/toml#inline-table)
+- :exclamation: [Array of Tables](https://github.com/toml-lang/toml#array-of-tables)
+
+:exclamation: _Supported with warnings see [Warning](#Warning)._
+
+#### :warning: Warning
+
+##### String
+
+- Regex : Due to the spec, there is no flag to detect regex properly
+ in a TOML declaration. So the regex is stored as string.
+
+##### Integer
+
+For **Binary** / **Octal** / **Hexadecimal** numbers,
+they are stored as string to be not interpreted as Decimal.
+
+##### Local Time
+
+Because local time does not exist in JavaScript, the local time is stored as a string.
+
+##### Inline Table
+
+Inline tables are supported. See below:
+
+```toml
+animal = { type = { name = "pug" } }
+## Output
+animal = { type.name = "pug" }
+## Output { animal : { type : { name : "pug" } }
+animal.as.leaders = "tosin"
+## Output { animal: { as: { leaders: "tosin" } } }
+"tosin.abasi" = "guitarist"
+## Output
+"tosin.abasi" : "guitarist"
+```
+
+##### Array of Tables
+
+At the moment only simple declarations like below are supported:
+
+```toml
+[[bin]]
+name = "deno"
+path = "cli/main.rs"
+
+[[bin]]
+name = "deno_core"
+path = "src/foo.rs"
+
+[[nib]]
+name = "node"
+path = "not_found"
+```
+
+will output:
+
+```json
+{
+ "bin": [
+ { "name": "deno", "path": "cli/main.rs" },
+ { "name": "deno_core", "path": "src/foo.rs" }
+ ],
+ "nib": [{ "name": "node", "path": "not_found" }]
+}
+```
+
+### Usage
+
+#### Parse
+
+```ts
+import { parse } from "./parser.ts";
+import { readFileStrSync } from "../fs/read_file_str.ts";
+
+const tomlObject = parse(readFileStrSync("file.toml"));
+
+const tomlString = 'foo.bar = "Deno"';
+const tomlObject22 = parse(tomlString);
+```
+
+#### Stringify
+
+```ts
+import { stringify } from "./parser.ts";
+const obj = {
+ bin: [
+ { name: "deno", path: "cli/main.rs" },
+ { name: "deno_core", path: "src/foo.rs" }
+ ],
+ nib: [{ name: "node", path: "not_found" }]
+};
+const tomlString = stringify(obj);
+```
diff --git a/std/encoding/csv.ts b/std/encoding/csv.ts
new file mode 100644
index 000000000..10d72a8a5
--- /dev/null
+++ b/std/encoding/csv.ts
@@ -0,0 +1,251 @@
+// Ported from Go:
+// https://github.com/golang/go/blob/go1.12.5/src/encoding/csv/
+// Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
+
+import { BufReader } from "../io/bufio.ts";
+import { TextProtoReader } from "../textproto/mod.ts";
+import { StringReader } from "../io/readers.ts";
+
+const INVALID_RUNE = ["\r", "\n", '"'];
+
+export class ParseError extends Error {
+ StartLine: number;
+ Line: number;
+ constructor(start: number, line: number, message: string) {
+ super(message);
+ this.StartLine = start;
+ this.Line = line;
+ }
+}
+
+/**
+ * @property comma - Character which separates values. Default: ','
+ * @property comment - Character to start a comment. Default: '#'
+ * @property trimLeadingSpace - Flag to trim the leading space of the value.
+ * Default: 'false'
+ * @property lazyQuotes - Allow unquoted quote in a quoted field or non double
+ * quoted quotes in quoted field Default: 'false'
+ * @property fieldsPerRecord - Enabling the check of fields for each row.
+ * If == 0, first row is used as referal for the number of fields.
+ */
+export interface ParseOptions {
+ comma?: string;
+ comment?: string;
+ trimLeadingSpace?: boolean;
+ lazyQuotes?: boolean;
+ fieldsPerRecord?: number;
+}
+
+function chkOptions(opt: ParseOptions): void {
+ if (!opt.comma) opt.comma = ",";
+ if (!opt.trimLeadingSpace) opt.trimLeadingSpace = false;
+ if (
+ INVALID_RUNE.includes(opt.comma!) ||
+ INVALID_RUNE.includes(opt.comment!) ||
+ opt.comma === opt.comment
+ ) {
+ throw new Error("Invalid Delimiter");
+ }
+}
+
+async function read(
+ Startline: number,
+ reader: BufReader,
+ opt: ParseOptions = { comma: ",", trimLeadingSpace: false }
+): Promise<string[] | Deno.EOF> {
+ const tp = new TextProtoReader(reader);
+ let line: string;
+ let result: string[] = [];
+ const lineIndex = Startline;
+
+ const r = await tp.readLine();
+ if (r === Deno.EOF) return Deno.EOF;
+ line = r;
+ // Normalize \r\n to \n on all input lines.
+ if (
+ line.length >= 2 &&
+ line[line.length - 2] === "\r" &&
+ line[line.length - 1] === "\n"
+ ) {
+ line = line.substring(0, line.length - 2);
+ line = line + "\n";
+ }
+
+ const trimmedLine = line.trimLeft();
+ if (trimmedLine.length === 0) {
+ return [];
+ }
+
+ // line starting with comment character is ignored
+ if (opt.comment && trimmedLine[0] === opt.comment) {
+ return [];
+ }
+
+ result = line.split(opt.comma!);
+
+ let quoteError = false;
+ result = result.map((r): string => {
+ if (opt.trimLeadingSpace) {
+ r = r.trimLeft();
+ }
+ if (r[0] === '"' && r[r.length - 1] === '"') {
+ r = r.substring(1, r.length - 1);
+ } else if (r[0] === '"') {
+ r = r.substring(1, r.length);
+ }
+
+ if (!opt.lazyQuotes) {
+ if (r[0] !== '"' && r.indexOf('"') !== -1) {
+ quoteError = true;
+ }
+ }
+ return r;
+ });
+ if (quoteError) {
+ throw new ParseError(Startline, lineIndex, 'bare " in non-quoted-field');
+ }
+ return result;
+}
+
+export async function readAll(
+ reader: BufReader,
+ opt: ParseOptions = {
+ comma: ",",
+ trimLeadingSpace: false,
+ lazyQuotes: false
+ }
+): Promise<string[][]> {
+ const result: string[][] = [];
+ let _nbFields: number;
+ let lineResult: string[];
+ let first = true;
+ let lineIndex = 0;
+ chkOptions(opt);
+
+ for (;;) {
+ const r = await read(lineIndex, reader, opt);
+ if (r === Deno.EOF) break;
+ lineResult = r;
+ lineIndex++;
+ // If fieldsPerRecord is 0, Read sets it to
+ // the number of fields in the first record
+ if (first) {
+ first = false;
+ if (opt.fieldsPerRecord !== undefined) {
+ if (opt.fieldsPerRecord === 0) {
+ _nbFields = lineResult.length;
+ } else {
+ _nbFields = opt.fieldsPerRecord;
+ }
+ }
+ }
+
+ if (lineResult.length > 0) {
+ if (_nbFields! && _nbFields! !== lineResult.length) {
+ throw new ParseError(lineIndex, lineIndex, "wrong number of fields");
+ }
+ result.push(lineResult);
+ }
+ }
+ return result;
+}
+
+/**
+ * HeaderOption provides the column definition
+ * and the parse function for each entry of the
+ * column.
+ */
+export interface HeaderOption {
+ name: string;
+ parse?: (input: string) => unknown;
+}
+
+export interface ExtendedParseOptions extends ParseOptions {
+ header: boolean | string[] | HeaderOption[];
+ parse?: (input: unknown) => unknown;
+}
+
+/**
+ * Csv parse helper to manipulate data.
+ * Provides an auto/custom mapper for columns and parse function
+ * for columns and rows.
+ * @param input Input to parse. Can be a string or BufReader.
+ * @param opt options of the parser.
+ * @param [opt.header=false] HeaderOptions
+ * @param [opt.parse=null] Parse function for rows.
+ * Example:
+ * const r = await parseFile('a,b,c\ne,f,g\n', {
+ * header: ["this", "is", "sparta"],
+ * parse: (e: Record<string, unknown>) => {
+ * return { super: e.this, street: e.is, fighter: e.sparta };
+ * }
+ * });
+ * // output
+ * [
+ * { super: "a", street: "b", fighter: "c" },
+ * { super: "e", street: "f", fighter: "g" }
+ * ]
+ */
+export async function parse(
+ input: string | BufReader,
+ opt: ExtendedParseOptions = {
+ header: false
+ }
+): Promise<unknown[]> {
+ let r: string[][];
+ if (input instanceof BufReader) {
+ r = await readAll(input, opt);
+ } else {
+ r = await readAll(new BufReader(new StringReader(input)), opt);
+ }
+ if (opt.header) {
+ let headers: HeaderOption[] = [];
+ let i = 0;
+ if (Array.isArray(opt.header)) {
+ if (typeof opt.header[0] !== "string") {
+ headers = opt.header as HeaderOption[];
+ } else {
+ const h = opt.header as string[];
+ headers = h.map(
+ (e): HeaderOption => {
+ return {
+ name: e
+ };
+ }
+ );
+ }
+ } else {
+ headers = r.shift()!.map(
+ (e): HeaderOption => {
+ return {
+ name: e
+ };
+ }
+ );
+ i++;
+ }
+ return r.map((e): unknown => {
+ if (e.length !== headers.length) {
+ throw `Error number of fields line:${i}`;
+ }
+ i++;
+ const out: Record<string, unknown> = {};
+ for (let j = 0; j < e.length; j++) {
+ const h = headers[j];
+ if (h.parse) {
+ out[h.name] = h.parse(e[j]);
+ } else {
+ out[h.name] = e[j];
+ }
+ }
+ if (opt.parse) {
+ return opt.parse(out);
+ }
+ return out;
+ });
+ }
+ if (opt.parse) {
+ return r.map((e: string[]): unknown => opt.parse!(e));
+ }
+ return r;
+}
diff --git a/std/encoding/csv_test.ts b/std/encoding/csv_test.ts
new file mode 100644
index 000000000..88a3a24d7
--- /dev/null
+++ b/std/encoding/csv_test.ts
@@ -0,0 +1,592 @@
+// Test ported from Golang
+// https://github.com/golang/go/blob/2cc15b1/src/encoding/csv/reader_test.go
+import { test, runIfMain } from "../testing/mod.ts";
+import { assertEquals, assert } from "../testing/asserts.ts";
+import { readAll, parse } from "./csv.ts";
+import { StringReader } from "../io/readers.ts";
+import { BufReader } from "../io/bufio.ts";
+
+const ErrInvalidDelim = "Invalid Delimiter";
+const ErrFieldCount = "wrong number of fields";
+const ErrBareQuote = 'bare " in non-quoted-field';
+
+// TODO(zekth): Activate remaining tests
+const testCases = [
+ {
+ Name: "Simple",
+ Input: "a,b,c\n",
+ Output: [["a", "b", "c"]]
+ },
+ {
+ Name: "CRLF",
+ Input: "a,b\r\nc,d\r\n",
+ Output: [["a", "b"], ["c", "d"]]
+ },
+ {
+ Name: "BareCR",
+ Input: "a,b\rc,d\r\n",
+ Output: [["a", "b\rc", "d"]]
+ },
+ // {
+ // Name: "RFC4180test",
+ // Input: `#field1,field2,field3
+ // "aaa","bbb","ccc"
+ // "a,a","bbb","ccc"
+ // zzz,yyy,xxx`,
+ // UseFieldsPerRecord: true,
+ // FieldsPerRecord: 0,
+ // Output: [
+ // ["#field1", "field2", "field3"],
+ // ["aaa", "bbb", "ccc"],
+ // ["a,a", `bbb`, "ccc"],
+ // ["zzz", "yyy", "xxx"]
+ // ]
+ // },
+ {
+ Name: "NoEOLTest",
+ Input: "a,b,c",
+ Output: [["a", "b", "c"]]
+ },
+ {
+ Name: "Semicolon",
+ Input: "a;b;c\n",
+ Output: [["a", "b", "c"]],
+ Comma: ";"
+ },
+ // {
+ // Name: "MultiLine",
+ // Input: `"two
+ // line","one line","three
+ // line
+ // field"`,
+ // Output: [["two\nline"], ["one line"], ["three\nline\nfield"]]
+ // },
+ {
+ Name: "BlankLine",
+ Input: "a,b,c\n\nd,e,f\n\n",
+ Output: [["a", "b", "c"], ["d", "e", "f"]]
+ },
+ {
+ Name: "BlankLineFieldCount",
+ Input: "a,b,c\n\nd,e,f\n\n",
+ Output: [["a", "b", "c"], ["d", "e", "f"]],
+ UseFieldsPerRecord: true,
+ FieldsPerRecord: 0
+ },
+ {
+ Name: "TrimSpace",
+ Input: " a, b, c\n",
+ Output: [["a", "b", "c"]],
+ TrimLeadingSpace: true
+ },
+ {
+ Name: "LeadingSpace",
+ Input: " a, b, c\n",
+ Output: [[" a", " b", " c"]]
+ },
+ {
+ Name: "Comment",
+ Input: "#1,2,3\na,b,c\n#comment",
+ Output: [["a", "b", "c"]],
+ Comment: "#"
+ },
+ {
+ Name: "NoComment",
+ Input: "#1,2,3\na,b,c",
+ Output: [["#1", "2", "3"], ["a", "b", "c"]]
+ },
+ {
+ Name: "LazyQuotes",
+ Input: `a "word","1"2",a","b`,
+ Output: [[`a "word"`, `1"2`, `a"`, `b`]],
+ LazyQuotes: true
+ },
+ {
+ Name: "BareQuotes",
+ Input: `a "word","1"2",a"`,
+ Output: [[`a "word"`, `1"2`, `a"`]],
+ LazyQuotes: true
+ },
+ {
+ Name: "BareDoubleQuotes",
+ Input: `a""b,c`,
+ Output: [[`a""b`, `c`]],
+ LazyQuotes: true
+ },
+ {
+ Name: "BadDoubleQuotes",
+ Input: `a""b,c`,
+ Error: ErrBareQuote
+ // Error: &ParseError{StartLine: 1, Line: 1, Column: 1, Err: ErrBareQuote},
+ },
+ {
+ Name: "TrimQuote",
+ Input: ` "a"," b",c`,
+ Output: [["a", " b", "c"]],
+ TrimLeadingSpace: true
+ },
+ {
+ Name: "BadBareQuote",
+ Input: `a "word","b"`,
+ Error: ErrBareQuote
+ // &ParseError{StartLine: 1, Line: 1, Column: 2, Err: ErrBareQuote}
+ },
+ {
+ Name: "BadTrailingQuote",
+ Input: `"a word",b"`,
+ Error: ErrBareQuote
+ },
+ {
+ Name: "ExtraneousQuote",
+ Input: `"a "word","b"`,
+ Error: ErrBareQuote
+ },
+ {
+ Name: "BadFieldCount",
+ Input: "a,b,c\nd,e",
+ Error: ErrFieldCount,
+ UseFieldsPerRecord: true,
+ FieldsPerRecord: 0
+ },
+ {
+ Name: "BadFieldCount1",
+ Input: `a,b,c`,
+ // Error: &ParseError{StartLine: 1, Line: 1, Err: ErrFieldCount},
+ UseFieldsPerRecord: true,
+ FieldsPerRecord: 2,
+ Error: ErrFieldCount
+ },
+ {
+ Name: "FieldCount",
+ Input: "a,b,c\nd,e",
+ Output: [["a", "b", "c"], ["d", "e"]]
+ },
+ {
+ Name: "TrailingCommaEOF",
+ Input: "a,b,c,",
+ Output: [["a", "b", "c", ""]]
+ },
+ {
+ Name: "TrailingCommaEOL",
+ Input: "a,b,c,\n",
+ Output: [["a", "b", "c", ""]]
+ },
+ {
+ Name: "TrailingCommaSpaceEOF",
+ Input: "a,b,c, ",
+ Output: [["a", "b", "c", ""]],
+ TrimLeadingSpace: true
+ },
+ {
+ Name: "TrailingCommaSpaceEOL",
+ Input: "a,b,c, \n",
+ Output: [["a", "b", "c", ""]],
+ TrimLeadingSpace: true
+ },
+ {
+ Name: "TrailingCommaLine3",
+ Input: "a,b,c\nd,e,f\ng,hi,",
+ Output: [["a", "b", "c"], ["d", "e", "f"], ["g", "hi", ""]],
+ TrimLeadingSpace: true
+ },
+ {
+ Name: "NotTrailingComma3",
+ Input: "a,b,c, \n",
+ Output: [["a", "b", "c", " "]]
+ },
+ {
+ Name: "CommaFieldTest",
+ Input: `x,y,z,w
+x,y,z,
+x,y,,
+x,,,
+,,,
+"x","y","z","w"
+"x","y","z",""
+"x","y","",""
+"x","","",""
+"","","",""
+`,
+ Output: [
+ ["x", "y", "z", "w"],
+ ["x", "y", "z", ""],
+ ["x", "y", "", ""],
+ ["x", "", "", ""],
+ ["", "", "", ""],
+ ["x", "y", "z", "w"],
+ ["x", "y", "z", ""],
+ ["x", "y", "", ""],
+ ["x", "", "", ""],
+ ["", "", "", ""]
+ ]
+ },
+ {
+ Name: "TrailingCommaIneffective1",
+ Input: "a,b,\nc,d,e",
+ Output: [["a", "b", ""], ["c", "d", "e"]],
+ TrimLeadingSpace: true
+ },
+ {
+ Name: "ReadAllReuseRecord",
+ Input: "a,b\nc,d",
+ Output: [["a", "b"], ["c", "d"]],
+ ReuseRecord: true
+ },
+ // {
+ // Name: "StartLine1", // Issue 19019
+ // Input: 'a,"b\nc"d,e',
+ // Error: true
+ // // Error: &ParseError{StartLine: 1, Line: 2, Column: 1, Err: ErrQuote},
+ // },
+ // {
+ // Name: "StartLine2",
+ // Input: 'a,b\n"d\n\n,e',
+ // Error: true
+ // // Error: &ParseError{StartLine: 2, Line: 5, Column: 0, Err: ErrQuote},
+ // },
+ // {
+ // Name: "CRLFInQuotedField", // Issue 21201
+ // Input: 'A,"Hello\r\nHi",B\r\n',
+ // Output: [["A", "Hello\nHi", "B"]]
+ // },
+ {
+ Name: "BinaryBlobField", // Issue 19410
+ Input: "x09\x41\xb4\x1c,aktau",
+ Output: [["x09A\xb4\x1c", "aktau"]]
+ },
+ // {
+ // Name: "TrailingCR",
+ // Input: "field1,field2\r",
+ // Output: [["field1", "field2"]]
+ // },
+ // {
+ // Name: "QuotedTrailingCR",
+ // Input: '"field"\r',
+ // Output: [['"field"']]
+ // },
+ // {
+ // Name: "QuotedTrailingCRCR",
+ // Input: '"field"\r\r',
+ // Error: true,
+ // // Error: &ParseError{StartLine: 1, Line: 1, Column: 6, Err: ErrQuote},
+ // },
+ // {
+ // Name: "FieldCR",
+ // Input: "field\rfield\r",
+ // Output: [["field\rfield"]]
+ // },
+ // {
+ // Name: "FieldCRCR",
+ // Input: "field\r\rfield\r\r",
+ // Output: [["field\r\rfield\r"]]
+ // },
+ {
+ Name: "FieldCRCRLF",
+ Input: "field\r\r\nfield\r\r\n",
+ Output: [["field\r"], ["field\r"]]
+ },
+ {
+ Name: "FieldCRCRLFCR",
+ Input: "field\r\r\n\rfield\r\r\n\r",
+ Output: [["field\r"], ["\rfield\r"]]
+ },
+ // {
+ // Name: "FieldCRCRLFCRCR",
+ // Input: "field\r\r\n\r\rfield\r\r\n\r\r",
+ // Output: [["field\r"], ["\r\rfield\r"], ["\r"]]
+ // },
+ // {
+ // Name: "MultiFieldCRCRLFCRCR",
+ // Input: "field1,field2\r\r\n\r\rfield1,field2\r\r\n\r\r,",
+ // Output: [
+ // ["field1", "field2\r"],
+ // ["\r\rfield1", "field2\r"],
+ // ["\r\r", ""]
+ // ]
+ // },
+ {
+ Name: "NonASCIICommaAndComment",
+ Input: "a£b,c£ \td,e\n€ comment\n",
+ Output: [["a", "b,c", "d,e"]],
+ TrimLeadingSpace: true,
+ Comma: "£",
+ Comment: "€"
+ },
+ {
+ Name: "NonASCIICommaAndCommentWithQuotes",
+ Input: 'a€" b,"€ c\nλ comment\n',
+ Output: [["a", " b,", " c"]],
+ Comma: "€",
+ Comment: "λ"
+ },
+ {
+ // λ and θ start with the same byte.
+ // This tests that the parser doesn't confuse such characters.
+ Name: "NonASCIICommaConfusion",
+ Input: '"abθcd"λefθgh',
+ Output: [["abθcd", "efθgh"]],
+ Comma: "λ",
+ Comment: "€"
+ },
+ {
+ Name: "NonASCIICommentConfusion",
+ Input: "λ\nλ\nθ\nλ\n",
+ Output: [["λ"], ["λ"], ["λ"]],
+ Comment: "θ"
+ },
+ // {
+ // Name: "QuotedFieldMultipleLF",
+ // Input: '"\n\n\n\n"',
+ // Output: [["\n\n\n\n"]]
+ // },
+ // {
+ // Name: "MultipleCRLF",
+ // Input: "\r\n\r\n\r\n\r\n"
+ // },
+ /**
+ * The implementation may read each line in several chunks if
+ * it doesn't fit entirely.
+ * in the read buffer, so we should test the code to handle that condition.
+ */
+ // {
+ // Name: "HugeLines",
+ // Input:
+ // strings.Repeat("#ignore\n", 10000) +
+ // strings.Repeat("@", 5000) +
+ // "," +
+ // strings.Repeat("*", 5000),
+ // Output: [[strings.Repeat("@", 5000), strings.Repeat("*", 5000)]],
+ // Comment: "#"
+ // },
+ {
+ Name: "QuoteWithTrailingCRLF",
+ Input: '"foo"bar"\r\n',
+ Error: ErrBareQuote
+ // Error: &ParseError{StartLine: 1, Line: 1, Column: 4, Err: ErrQuote},
+ },
+ {
+ Name: "LazyQuoteWithTrailingCRLF",
+ Input: '"foo"bar"\r\n',
+ Output: [[`foo"bar`]],
+ LazyQuotes: true
+ },
+ // {
+ // Name: "DoubleQuoteWithTrailingCRLF",
+ // Input: '"foo""bar"\r\n',
+ // Output: [[`foo"bar`]]
+ // },
+ // {
+ // Name: "EvenQuotes",
+ // Input: `""""""""`,
+ // Output: [[`"""`]]
+ // },
+ // {
+ // Name: "OddQuotes",
+ // Input: `"""""""`,
+ // Error: true
+ // // Error:" &ParseError{StartLine: 1, Line: 1, Column: 7, Err: ErrQuote}",
+ // },
+ // {
+ // Name: "LazyOddQuotes",
+ // Input: `"""""""`,
+ // Output: [[`"""`]],
+ // LazyQuotes: true
+ // },
+ {
+ Name: "BadComma1",
+ Comma: "\n",
+ Error: ErrInvalidDelim
+ },
+ {
+ Name: "BadComma2",
+ Comma: "\r",
+ Error: ErrInvalidDelim
+ },
+ {
+ Name: "BadComma3",
+ Comma: '"',
+ Error: ErrInvalidDelim
+ },
+ {
+ Name: "BadComment1",
+ Comment: "\n",
+ Error: ErrInvalidDelim
+ },
+ {
+ Name: "BadComment2",
+ Comment: "\r",
+ Error: ErrInvalidDelim
+ },
+ {
+ Name: "BadCommaComment",
+ Comma: "X",
+ Comment: "X",
+ Error: ErrInvalidDelim
+ }
+];
+for (const t of testCases) {
+ test({
+ name: `[CSV] ${t.Name}`,
+ async fn(): Promise<void> {
+ let comma = ",";
+ let comment;
+ let fieldsPerRec;
+ let trim = false;
+ let lazyquote = false;
+ if (t.Comma) {
+ comma = t.Comma;
+ }
+ if (t.Comment) {
+ comment = t.Comment;
+ }
+ if (t.TrimLeadingSpace) {
+ trim = true;
+ }
+ if (t.UseFieldsPerRecord) {
+ fieldsPerRec = t.FieldsPerRecord;
+ }
+ if (t.LazyQuotes) {
+ lazyquote = t.LazyQuotes;
+ }
+ let actual;
+ if (t.Error) {
+ let err;
+ try {
+ actual = await readAll(new BufReader(new StringReader(t.Input!)), {
+ comma: comma,
+ comment: comment,
+ trimLeadingSpace: trim,
+ fieldsPerRecord: fieldsPerRec,
+ lazyQuotes: lazyquote
+ });
+ } catch (e) {
+ err = e;
+ }
+ assert(err);
+ assertEquals(err.message, t.Error);
+ } else {
+ actual = await readAll(new BufReader(new StringReader(t.Input!)), {
+ comma: comma,
+ comment: comment,
+ trimLeadingSpace: trim,
+ fieldsPerRecord: fieldsPerRec,
+ lazyQuotes: lazyquote
+ });
+ const expected = t.Output;
+ assertEquals(actual, expected);
+ }
+ }
+ });
+}
+
+const parseTestCases = [
+ {
+ name: "simple",
+ in: "a,b,c",
+ header: false,
+ result: [["a", "b", "c"]]
+ },
+ {
+ name: "simple Bufreader",
+ in: new BufReader(new StringReader("a,b,c")),
+ header: false,
+ result: [["a", "b", "c"]]
+ },
+ {
+ name: "multiline",
+ in: "a,b,c\ne,f,g\n",
+ header: false,
+ result: [["a", "b", "c"], ["e", "f", "g"]]
+ },
+ {
+ name: "header mapping boolean",
+ in: "a,b,c\ne,f,g\n",
+ header: true,
+ result: [{ a: "e", b: "f", c: "g" }]
+ },
+ {
+ name: "header mapping array",
+ in: "a,b,c\ne,f,g\n",
+ header: ["this", "is", "sparta"],
+ result: [
+ { this: "a", is: "b", sparta: "c" },
+ { this: "e", is: "f", sparta: "g" }
+ ]
+ },
+ {
+ name: "header mapping object",
+ in: "a,b,c\ne,f,g\n",
+ header: [{ name: "this" }, { name: "is" }, { name: "sparta" }],
+ result: [
+ { this: "a", is: "b", sparta: "c" },
+ { this: "e", is: "f", sparta: "g" }
+ ]
+ },
+ {
+ name: "header mapping parse entry",
+ in: "a,b,c\ne,f,g\n",
+ header: [
+ {
+ name: "this",
+ parse: (e: string): string => {
+ return `b${e}$$`;
+ }
+ },
+ {
+ name: "is",
+ parse: (e: string): number => {
+ return e.length;
+ }
+ },
+ {
+ name: "sparta",
+ parse: (e: string): unknown => {
+ return { bim: `boom-${e}` };
+ }
+ }
+ ],
+ result: [
+ { this: "ba$$", is: 1, sparta: { bim: `boom-c` } },
+ { this: "be$$", is: 1, sparta: { bim: `boom-g` } }
+ ]
+ },
+ {
+ name: "multiline parse",
+ in: "a,b,c\ne,f,g\n",
+ parse: (e: string[]): unknown => {
+ return { super: e[0], street: e[1], fighter: e[2] };
+ },
+ header: false,
+ result: [
+ { super: "a", street: "b", fighter: "c" },
+ { super: "e", street: "f", fighter: "g" }
+ ]
+ },
+ {
+ name: "header mapping object parseline",
+ in: "a,b,c\ne,f,g\n",
+ header: [{ name: "this" }, { name: "is" }, { name: "sparta" }],
+ parse: (e: Record<string, unknown>): unknown => {
+ return { super: e.this, street: e.is, fighter: e.sparta };
+ },
+ result: [
+ { super: "a", street: "b", fighter: "c" },
+ { super: "e", street: "f", fighter: "g" }
+ ]
+ }
+];
+
+for (const testCase of parseTestCases) {
+ test({
+ name: `[CSV] Parse ${testCase.name}`,
+ async fn(): Promise<void> {
+ const r = await parse(testCase.in, {
+ header: testCase.header,
+ parse: testCase.parse as (input: unknown) => unknown
+ });
+ assertEquals(r, testCase.result);
+ }
+ });
+}
+
+runIfMain(import.meta);
diff --git a/std/encoding/hex.ts b/std/encoding/hex.ts
new file mode 100644
index 000000000..de3d76869
--- /dev/null
+++ b/std/encoding/hex.ts
@@ -0,0 +1,143 @@
+// Ported from Go
+// https://github.com/golang/go/blob/go1.12.5/src/encoding/hex/hex.go
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
+
+const hextable = new TextEncoder().encode("0123456789abcdef");
+
+export function errInvalidByte(byte: number): Error {
+ return new Error(
+ "encoding/hex: invalid byte: " +
+ new TextDecoder().decode(new Uint8Array([byte]))
+ );
+}
+
+export function errLength(): Error {
+ return new Error("encoding/hex: odd length hex string");
+}
+
+// fromHexChar converts a hex character into its value and a success flag.
+function fromHexChar(byte: number): [number, boolean] {
+ switch (true) {
+ case 48 <= byte && byte <= 57: // '0' <= byte && byte <= '9'
+ return [byte - 48, true];
+ case 97 <= byte && byte <= 102: // 'a' <= byte && byte <= 'f'
+ return [byte - 97 + 10, true];
+ case 65 <= byte && byte <= 70: // 'A' <= byte && byte <= 'F'
+ return [byte - 65 + 10, true];
+ }
+ return [0, false];
+}
+
+/**
+ * EncodedLen returns the length of an encoding of n source bytes. Specifically,
+ * it returns n * 2.
+ * @param n
+ */
+export function encodedLen(n: number): number {
+ return n * 2;
+}
+
+/**
+ * Encode encodes `src` into `encodedLen(src.length)` bytes of `dst`.
+ * As a convenience, it returns the number of bytes written to `dst`
+ * but this value is always `encodedLen(src.length)`.
+ * Encode implements hexadecimal encoding.
+ * @param dst
+ * @param src
+ */
+export function encode(dst: Uint8Array, src: Uint8Array): number {
+ const srcLength = encodedLen(src.length);
+ if (dst.length !== srcLength) {
+ throw new Error("Out of index.");
+ }
+ for (let i = 0; i < src.length; i++) {
+ const v = src[i];
+ dst[i * 2] = hextable[v >> 4];
+ dst[i * 2 + 1] = hextable[v & 0x0f];
+ }
+ return srcLength;
+}
+
+/**
+ * EncodeToString returns the hexadecimal encoding of `src`.
+ * @param src
+ */
+export function encodeToString(src: Uint8Array): string {
+ const dest = new Uint8Array(encodedLen(src.length));
+ encode(dest, src);
+ return new TextDecoder().decode(dest);
+}
+
+/**
+ * Decode decodes `src` into `decodedLen(src.length)` bytes
+ * returning the actual number of bytes written to `dst`.
+ * Decode expects that `src` contains only hexadecimal characters and that `src`
+ * has even length.
+ * If the input is malformed, Decode returns the number of bytes decoded before
+ * the error.
+ * @param dst
+ * @param src
+ */
+export function decode(
+ dst: Uint8Array,
+ src: Uint8Array
+): [number, Error | void] {
+ let i = 0;
+ for (; i < Math.floor(src.length / 2); i++) {
+ const [a, aOK] = fromHexChar(src[i * 2]);
+ if (!aOK) {
+ return [i, errInvalidByte(src[i * 2])];
+ }
+ const [b, bOK] = fromHexChar(src[i * 2 + 1]);
+ if (!bOK) {
+ return [i, errInvalidByte(src[i * 2 + 1])];
+ }
+
+ dst[i] = (a << 4) | b;
+ }
+
+ if (src.length % 2 == 1) {
+ // Check for invalid char before reporting bad length,
+ // since the invalid char (if present) is an earlier problem.
+ const [, ok] = fromHexChar(src[i * 2]);
+ if (!ok) {
+ return [i, errInvalidByte(src[i * 2])];
+ }
+ return [i, errLength()];
+ }
+
+ return [i, undefined];
+}
+
+/**
+ * DecodedLen returns the length of a decoding of `x` source bytes.
+ * Specifically, it returns `x / 2`.
+ * @param x
+ */
+export function decodedLen(x: number): number {
+ return Math.floor(x / 2);
+}
+
+/**
+ * DecodeString returns the bytes represented by the hexadecimal string `s`.
+ * DecodeString expects that src contains only hexadecimal characters and that
+ * src has even length.
+ * If the input is malformed, DecodeString will throws an error.
+ * @param s the `string` need to decode to `Uint8Array`
+ */
+export function decodeString(s: string): Uint8Array {
+ const src = new TextEncoder().encode(s);
+ // We can use the source slice itself as the destination
+ // because the decode loop increments by one and then the 'seen' byte is not
+ // used anymore.
+ const [n, err] = decode(src, src);
+
+ if (err) {
+ throw err;
+ }
+
+ return src.slice(0, n);
+}
diff --git a/std/encoding/hex_test.ts b/std/encoding/hex_test.ts
new file mode 100644
index 000000000..5ea81ebdb
--- /dev/null
+++ b/std/encoding/hex_test.ts
@@ -0,0 +1,182 @@
+// Ported from Go
+// https://github.com/golang/go/blob/go1.12.5/src/encoding/hex/hex.go
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
+import { test, runIfMain } from "../testing/mod.ts";
+import { assertEquals, assertThrows } from "../testing/asserts.ts";
+
+import {
+ encodedLen,
+ encode,
+ encodeToString,
+ decodedLen,
+ decode,
+ decodeString,
+ errLength,
+ errInvalidByte
+} from "./hex.ts";
+
+function toByte(s: string): number {
+ return new TextEncoder().encode(s)[0];
+}
+
+const testCases = [
+ // encoded(hex) / decoded(Uint8Array)
+ ["", []],
+ ["0001020304050607", [0, 1, 2, 3, 4, 5, 6, 7]],
+ ["08090a0b0c0d0e0f", [8, 9, 10, 11, 12, 13, 14, 15]],
+ ["f0f1f2f3f4f5f6f7", [0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7]],
+ ["f8f9fafbfcfdfeff", [0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff]],
+ ["67", Array.from(new TextEncoder().encode("g"))],
+ ["e3a1", [0xe3, 0xa1]]
+];
+
+const errCases = [
+ // encoded(hex) / error
+ ["", "", undefined],
+ ["0", "", errLength()],
+ ["zd4aa", "", errInvalidByte(toByte("z"))],
+ ["d4aaz", "\xd4\xaa", errInvalidByte(toByte("z"))],
+ ["30313", "01", errLength()],
+ ["0g", "", errInvalidByte(new TextEncoder().encode("g")[0])],
+ ["00gg", "\x00", errInvalidByte(new TextEncoder().encode("g")[0])],
+ ["0\x01", "", errInvalidByte(new TextEncoder().encode("\x01")[0])],
+ ["ffeed", "\xff\xee", errLength()]
+];
+
+test({
+ name: "[encoding.hex] encodedLen",
+ fn(): void {
+ assertEquals(encodedLen(0), 0);
+ assertEquals(encodedLen(1), 2);
+ assertEquals(encodedLen(2), 4);
+ assertEquals(encodedLen(3), 6);
+ assertEquals(encodedLen(4), 8);
+ }
+});
+
+test({
+ name: "[encoding.hex] encode",
+ fn(): void {
+ {
+ const srcStr = "abc";
+ const src = new TextEncoder().encode(srcStr);
+ const dest = new Uint8Array(encodedLen(src.length));
+ const int = encode(dest, src);
+ assertEquals(src, new Uint8Array([97, 98, 99]));
+ assertEquals(int, 6);
+ }
+
+ {
+ const srcStr = "abc";
+ const src = new TextEncoder().encode(srcStr);
+ const dest = new Uint8Array(2); // out of index
+ assertThrows(
+ (): void => {
+ encode(dest, src);
+ },
+ Error,
+ "Out of index."
+ );
+ }
+
+ for (const [enc, dec] of testCases) {
+ const dest = new Uint8Array(encodedLen(dec.length));
+ const src = new Uint8Array(dec as number[]);
+ const n = encode(dest, src);
+ assertEquals(dest.length, n);
+ assertEquals(new TextDecoder().decode(dest), enc);
+ }
+ }
+});
+
+test({
+ name: "[encoding.hex] encodeToString",
+ fn(): void {
+ for (const [enc, dec] of testCases) {
+ assertEquals(encodeToString(new Uint8Array(dec as number[])), enc);
+ }
+ }
+});
+
+test({
+ name: "[encoding.hex] decodedLen",
+ fn(): void {
+ assertEquals(decodedLen(0), 0);
+ assertEquals(decodedLen(2), 1);
+ assertEquals(decodedLen(4), 2);
+ assertEquals(decodedLen(6), 3);
+ assertEquals(decodedLen(8), 4);
+ }
+});
+
+test({
+ name: "[encoding.hex] decode",
+ fn(): void {
+ // Case for decoding uppercase hex characters, since
+ // Encode always uses lowercase.
+ const extraTestcase = [
+ ["F8F9FAFBFCFDFEFF", [0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff]]
+ ];
+
+ const cases = testCases.concat(extraTestcase);
+
+ for (const [enc, dec] of cases) {
+ const dest = new Uint8Array(decodedLen(enc.length));
+ const src = new TextEncoder().encode(enc as string);
+ const [, err] = decode(dest, src);
+ assertEquals(err, undefined);
+ assertEquals(Array.from(dest), Array.from(dec as number[]));
+ }
+ }
+});
+
+test({
+ name: "[encoding.hex] decodeString",
+ fn(): void {
+ for (const [enc, dec] of testCases) {
+ const dst = decodeString(enc as string);
+
+ assertEquals(dec, Array.from(dst));
+ }
+ }
+});
+
+test({
+ name: "[encoding.hex] decode error",
+ fn(): void {
+ for (const [input, output, expectedErr] of errCases) {
+ const out = new Uint8Array((input as string).length + 10);
+ const [n, err] = decode(out, new TextEncoder().encode(input as string));
+ assertEquals(
+ new TextDecoder("ascii").decode(out.slice(0, n)),
+ output as string
+ );
+ assertEquals(err, expectedErr);
+ }
+ }
+});
+
+test({
+ name: "[encoding.hex] decodeString error",
+ fn(): void {
+ for (const [input, output, expectedErr] of errCases) {
+ if (expectedErr) {
+ assertThrows(
+ (): void => {
+ decodeString(input as string);
+ },
+ Error,
+ (expectedErr as Error).message
+ );
+ } else {
+ const out = decodeString(input as string);
+ assertEquals(new TextDecoder("ascii").decode(out), output as string);
+ }
+ }
+ }
+});
+
+runIfMain(import.meta);
diff --git a/std/encoding/testdata/CRLF.toml b/std/encoding/testdata/CRLF.toml
new file mode 100644
index 000000000..92264888a
--- /dev/null
+++ b/std/encoding/testdata/CRLF.toml
@@ -0,0 +1,3 @@
+[boolean]
+bool1 = true
+bool2 = false \ No newline at end of file
diff --git a/std/encoding/testdata/arrayTable.toml b/std/encoding/testdata/arrayTable.toml
new file mode 100644
index 000000000..3788b7e7c
--- /dev/null
+++ b/std/encoding/testdata/arrayTable.toml
@@ -0,0 +1,12 @@
+
+[[bin]]
+name = "deno"
+path = "cli/main.rs"
+
+[[bin]]
+name = "deno_core"
+path = "src/foo.rs"
+
+[[nib]]
+name = "node"
+path = "not_found" \ No newline at end of file
diff --git a/std/encoding/testdata/arrays.toml b/std/encoding/testdata/arrays.toml
new file mode 100644
index 000000000..5d5913d0c
--- /dev/null
+++ b/std/encoding/testdata/arrays.toml
@@ -0,0 +1,8 @@
+[arrays]
+data = [ ["gamma", "delta"], [1, 2] ]
+
+# Line breaks are OK when inside arrays
+hosts = [
+ "alpha",
+ "omega"
+]
diff --git a/std/encoding/testdata/boolean.toml b/std/encoding/testdata/boolean.toml
new file mode 100644
index 000000000..242d29c96
--- /dev/null
+++ b/std/encoding/testdata/boolean.toml
@@ -0,0 +1,3 @@
+[boolean] # i hate comments
+bool1 = true
+bool2 = false \ No newline at end of file
diff --git a/std/encoding/testdata/cargo.toml b/std/encoding/testdata/cargo.toml
new file mode 100644
index 000000000..291aa7db6
--- /dev/null
+++ b/std/encoding/testdata/cargo.toml
@@ -0,0 +1,56 @@
+# Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
+# Dummy package info required by `cargo fetch`.
+# Use tools/sync_third_party.py to install deps after editing this file.
+# Deno does not build with cargo. Deno uses a build system called gn.
+# See build_extra/rust/BUILD.gn for the manually built configuration of rust
+# crates.
+
+[workspace]
+members = [
+ "./",
+ "core",
+]
+
+[[bin]]
+name = "deno"
+path = "cli/main.rs"
+
+[package]
+name = "deno"
+version = "0.3.4"
+edition = "2018"
+
+[dependencies]
+deno_core = { path = "./core" }
+
+ansi_term = "0.11.0"
+atty = "0.2.11"
+dirs = "1.0.5"
+flatbuffers = "0.5.0"
+futures = "0.1.25"
+getopts = "0.2.18"
+http = "0.1.16"
+hyper = "0.12.24"
+hyper-rustls = "0.16.0"
+integer-atomics = "1.0.2"
+lazy_static = "1.3.0"
+libc = "0.2.49"
+log = "0.4.6"
+rand = "0.6.5"
+regex = "1.1.0"
+remove_dir_all = "0.5.1"
+ring = "0.14.6"
+rustyline = "3.0.0"
+serde_json = "1.0.38"
+source-map-mappings = "0.5.0"
+tempfile = "3.0.7"
+tokio = "0.1.15"
+tokio-executor = "0.1.6"
+tokio-fs = "0.1.5"
+tokio-io = "0.1.11"
+tokio-process = "0.2.3"
+tokio-threadpool = "0.1.11"
+url = "1.7.2"
+
+[target.'cfg(windows)'.dependencies]
+winapi = "0.3.6"
diff --git a/std/encoding/testdata/cargoTest.toml b/std/encoding/testdata/cargoTest.toml
new file mode 100644
index 000000000..47e7f6e4d
--- /dev/null
+++ b/std/encoding/testdata/cargoTest.toml
@@ -0,0 +1,147 @@
+# This is a TOML document.
+
+title = "TOML Example"
+
+[deeply.nested.object.in.the.toml]
+name = "Tom Preston-Werner"
+dob = 2009-05-27T07:32:00
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+ # Indentation (tabs and/or spaces) is allowed but not required
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ]
+
+# Line breaks are OK when inside arrays
+hosts = [
+ "alpha",
+ "omega"
+]
+
+[strings]
+str0 = "deno"
+str1 = """
+Roses are red
+ Violets are blue"""
+# On a Unix system, the above multi-line string will most likely be the same as:
+str2 = "Roses are red\nViolets are blue"
+
+# On a Windows system, it will most likely be equivalent to:
+str3 = "Roses are red\r\nViolets are blue"
+str4 = "The quick brown fox jumps over the lazy dog."
+str5 = "this is a \"quote\""
+
+str5 = """
+The quick brown \
+
+
+ fox jumps over \
+ the lazy dog."""
+
+str6 = """\
+ The quick brown \
+ fox jumps over \
+ the lazy dog.\
+ """
+lines = '''
+The first newline is
+trimmed in raw strings.
+ All other whitespace
+ is preserved.
+'''
+
+[Integer]
+int1 = +99
+int2 = 42
+int3 = 0
+int4 = -17
+int5 = 1_000
+int6 = 5_349_221
+int7 = 1_2_3_4_5 # VALID but discouraged
+
+# hexadecimal with prefix `0x`
+hex1 = 0xDEADBEEF
+hex2 = 0xdeadbeef
+hex3 = 0xdead_beef
+
+# octal with prefix `0o`
+oct1 = 0o01234567
+oct2 = 0o755 # useful for Unix file permissions
+
+# binary with prefix `0b`
+bin1 = 0b11010110
+
+[Date-Time]
+odt1 = 1979-05-27T07:32:00Z
+odt2 = 1979-05-27T00:32:00-07:00
+odt3 = 1979-05-27T00:32:00.999999-07:00
+odt4 = 1979-05-27 07:32:00Z
+ld1 = 1979-05-27
+lt1 = 07:32:00 #buggy
+lt2 = 00:32:00.999999 #buggy
+
+[boolean]
+bool1 = true
+bool2 = false
+
+[float]
+# fractional
+flt1 = +1.0
+flt2 = 3.1415
+flt3 = -0.01
+
+# exponent
+flt4 = 5e+22
+flt5 = 1e6
+flt6 = -2E-2
+
+# both
+flt7 = 6.626e-34
+flt8 = 224_617.445_991_228
+# infinity
+sf1 = inf # positive infinity
+sf2 = +inf # positive infinity
+sf3 = -inf # negative infinity
+
+# not a number
+sf4 = nan # actual sNaN/qNaN encoding is implementation specific
+sf5 = +nan # same as `nan`
+sf6 = -nan # valid, actual encoding is implementation specific
+
+[Table]
+name = { first = "Tom", last = "Preston-Werner" }
+point = { x = 1, y = 2 }
+animal = { type.name = "pug" }
+
+[[fruit]]
+ name = "apple"
+
+ [fruit.physical]
+ color = "red"
+ shape = "round"
+
+ [[fruit.variety]]
+ name = "red delicious"
+
+ [[fruit.variety]]
+ name = "granny smith"
+
+[[fruit]]
+ name = "banana"
+
+ [[fruit.variety]]
+ name = "plantain"
diff --git a/std/encoding/testdata/datetime.toml b/std/encoding/testdata/datetime.toml
new file mode 100644
index 000000000..62377a4ba
--- /dev/null
+++ b/std/encoding/testdata/datetime.toml
@@ -0,0 +1,8 @@
+[datetime]
+odt1 = 1979-05-27T07:32:00Z # Comment
+odt2 = 1979-05-27T00:32:00-07:00 # Comment
+odt3 = 1979-05-27T00:32:00.999999-07:00 # Comment
+odt4 = 1979-05-27 07:32:00Z # Comment
+ld1 = 1979-05-27 # Comment
+lt1 = 07:32:00 # Comment
+lt2 = 00:32:00.999999 # Comment
diff --git a/std/encoding/testdata/float.toml b/std/encoding/testdata/float.toml
new file mode 100644
index 000000000..6a384179c
--- /dev/null
+++ b/std/encoding/testdata/float.toml
@@ -0,0 +1,23 @@
+[float]
+# fractional
+flt1 = +1.0 # Comment
+flt2 = 3.1415 # Comment
+flt3 = -0.01 # Comment
+
+# exponent
+flt4 = 5e+22 # Comment
+flt5 = 1e6 # Comment
+flt6 = -2E-2 # Comment
+
+# both
+flt7 = 6.626e-34 # Comment
+flt8 = 224_617.445_991_228 # Comment
+# infinity
+sf1 = inf # positive infinity
+sf2 = +inf # positive infinity
+sf3 = -inf # negative infinity
+
+# not a number
+sf4 = nan # actual sNaN/qNaN encoding is implementation specific
+sf5 = +nan # same as `nan`
+sf6 = -nan # valid, actual encoding is implementation specific \ No newline at end of file
diff --git a/std/encoding/testdata/inlineTable.toml b/std/encoding/testdata/inlineTable.toml
new file mode 100644
index 000000000..203cb16db
--- /dev/null
+++ b/std/encoding/testdata/inlineTable.toml
@@ -0,0 +1,7 @@
+[inlinetable]
+name = { first = "Tom", last = "Preston-Werner" }
+point = { x = 1, y = 2 }
+dog = { type = { name = "pug" } }
+animal.as.leaders = "tosin"
+"tosin.abasi" = "guitarist"
+nile = { derek.roddy = "drummer", also = { malevolant.creation = { drum.kit = "Tama" } } } \ No newline at end of file
diff --git a/std/encoding/testdata/integer.toml b/std/encoding/testdata/integer.toml
new file mode 100644
index 000000000..3bd781e8f
--- /dev/null
+++ b/std/encoding/testdata/integer.toml
@@ -0,0 +1,20 @@
+[integer]
+int1 = +99
+int2 = 42
+int3 = 0
+int4 = -17
+int5 = 1_000
+int6 = 5_349_221
+int7 = 1_2_3_4_5 # VALID but discouraged
+
+# hexadecimal with prefix `0x`
+hex1 = 0xDEADBEEF
+hex2 = 0xdeadbeef
+hex3 = 0xdead_beef
+
+# octal with prefix `0o`
+oct1 = 0o01234567
+oct2 = 0o755 # useful for Unix file permissions
+
+# binary with prefix `0b`
+bin1 = 0b11010110 \ No newline at end of file
diff --git a/std/encoding/testdata/simple.toml b/std/encoding/testdata/simple.toml
new file mode 100644
index 000000000..f3f6c1036
--- /dev/null
+++ b/std/encoding/testdata/simple.toml
@@ -0,0 +1,5 @@
+deno = "is"
+not = "[node]"
+regex = '<\i\c*\s*>'
+NANI = '何?!'
+comment = "Comment inside # the comment" # Comment
diff --git a/std/encoding/testdata/string.toml b/std/encoding/testdata/string.toml
new file mode 100644
index 000000000..f811824eb
--- /dev/null
+++ b/std/encoding/testdata/string.toml
@@ -0,0 +1,30 @@
+[strings]
+str0 = "deno"
+str1 = """
+Roses are not Deno
+ Violets are not Deno either"""
+# On a Unix system, the above multi-line string will most likely be the same as:
+str2 = "Roses are not Deno\nViolets are not Deno either"
+
+# On a Windows system, it will most likely be equivalent to:
+str3 = "Roses are not Deno\r\nViolets are not Deno either"
+str4 = "this is a \"quote\""
+
+str5 = """
+The quick brown \
+
+
+ fox jumps over \
+ the lazy dog."""
+
+str6 = """\
+ The quick brown \
+ fox jumps over \
+ the lazy dog.\
+ """
+lines = '''
+The first newline is
+trimmed in raw strings.
+ All other whitespace
+ is preserved.
+''' \ No newline at end of file
diff --git a/std/encoding/testdata/table.toml b/std/encoding/testdata/table.toml
new file mode 100644
index 000000000..7008e6fb0
--- /dev/null
+++ b/std/encoding/testdata/table.toml
@@ -0,0 +1,13 @@
+[deeply.nested.object.in.the.toml]
+name = "Tom Preston-Werner"
+
+[servers]
+
+ # Indentation (tabs and/or spaces) is allowed but not required
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc20" \ No newline at end of file
diff --git a/std/encoding/toml.ts b/std/encoding/toml.ts
new file mode 100644
index 000000000..0cbd51ba0
--- /dev/null
+++ b/std/encoding/toml.ts
@@ -0,0 +1,565 @@
+// Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
+import { deepAssign } from "../util/deep_assign.ts";
+import { pad } from "../strings/pad.ts";
+
+class KeyValuePair {
+ constructor(public key: string, public value: unknown) {}
+}
+
+class ParserGroup {
+ arrValues: unknown[] = [];
+ objValues: Record<string, unknown> = {};
+
+ constructor(public type: string, public name: string) {}
+}
+
+class ParserContext {
+ currentGroup?: ParserGroup;
+ output: Record<string, unknown> = {};
+}
+
+class Parser {
+ tomlLines: string[];
+ context: ParserContext;
+ constructor(tomlString: string) {
+ this.tomlLines = this._split(tomlString);
+ this.context = new ParserContext();
+ }
+ _sanitize(): void {
+ const out: string[] = [];
+ for (let i = 0; i < this.tomlLines.length; i++) {
+ const s = this.tomlLines[i];
+ const trimmed = s.trim();
+ if (trimmed !== "" && trimmed[0] !== "#") {
+ out.push(s);
+ }
+ }
+ this.tomlLines = out;
+ this._mergeMultilines();
+ }
+
+ _mergeMultilines(): void {
+ function arrayStart(line: string): boolean {
+ const reg = /.*=\s*\[/g;
+ return reg.test(line) && !(line[line.length - 1] === "]");
+ }
+
+ function arrayEnd(line: string): boolean {
+ return line[line.length - 1] === "]";
+ }
+
+ function stringStart(line: string): boolean {
+ const m = line.match(/.*=\s*(?:\"\"\"|''')/);
+ if (!m) {
+ return false;
+ }
+ return !line.endsWith(`"""`) || !line.endsWith(`'''`);
+ }
+
+ function stringEnd(line: string): boolean {
+ return line.endsWith(`'''`) || line.endsWith(`"""`);
+ }
+
+ function isLiteralString(line: string): boolean {
+ return line.match(/'''/) ? true : false;
+ }
+
+ const merged = [];
+ let acc = [],
+ isLiteral = false,
+ capture = false,
+ captureType = "",
+ merge = false;
+
+ for (let i = 0; i < this.tomlLines.length; i++) {
+ const line = this.tomlLines[i];
+ const trimmed = line.trim();
+ if (!capture && arrayStart(trimmed)) {
+ capture = true;
+ captureType = "array";
+ } else if (!capture && stringStart(trimmed)) {
+ isLiteral = isLiteralString(trimmed);
+ capture = true;
+ captureType = "string";
+ } else if (capture && arrayEnd(trimmed)) {
+ merge = true;
+ } else if (capture && stringEnd(trimmed)) {
+ merge = true;
+ }
+
+ if (capture) {
+ if (isLiteral) {
+ acc.push(line);
+ } else {
+ acc.push(trimmed);
+ }
+ } else {
+ if (isLiteral) {
+ merged.push(line);
+ } else {
+ merged.push(trimmed);
+ }
+ }
+
+ if (merge) {
+ capture = false;
+ merge = false;
+ if (captureType === "string") {
+ merged.push(
+ acc
+ .join("\n")
+ .replace(/"""/g, '"')
+ .replace(/'''/g, `'`)
+ .replace(/\n/g, "\\n")
+ );
+ isLiteral = false;
+ } else {
+ merged.push(acc.join(""));
+ }
+ captureType = "";
+ acc = [];
+ }
+ }
+ this.tomlLines = merged;
+ }
+ _unflat(keys: string[], values: object = {}, cObj: object = {}): object {
+ const out: Record<string, unknown> = {};
+ if (keys.length === 0) {
+ return cObj;
+ } else {
+ if (Object.keys(cObj).length === 0) {
+ cObj = values;
+ }
+ const key: string | undefined = keys.pop();
+ if (key) {
+ out[key] = cObj;
+ }
+ return this._unflat(keys, values, out);
+ }
+ }
+ _groupToOutput(): void {
+ const arrProperty = this.context
+ .currentGroup!.name.replace(/"/g, "")
+ .replace(/'/g, "")
+ .split(".");
+ let u = {};
+ if (this.context.currentGroup!.type === "array") {
+ u = this._unflat(arrProperty, this.context.currentGroup!.arrValues);
+ } else {
+ u = this._unflat(arrProperty, this.context.currentGroup!.objValues);
+ }
+ deepAssign(this.context.output, u);
+ delete this.context.currentGroup;
+ }
+ _split(str: string): string[] {
+ const out = [];
+ out.push(...str.split("\n"));
+ return out;
+ }
+ _isGroup(line: string): boolean {
+ const t = line.trim();
+ return t[0] === "[" && /\[(.*)\]/.exec(t) ? true : false;
+ }
+ _isDeclaration(line: string): boolean {
+ return line.split("=").length > 1;
+ }
+ _createGroup(line: string): void {
+ const captureReg = /\[(.*)\]/;
+ if (this.context.currentGroup) {
+ this._groupToOutput();
+ }
+
+ let type;
+ let name = line.match(captureReg)![1];
+ if (name.match(/\[.*\]/)) {
+ type = "array";
+ name = name.match(captureReg)![1];
+ } else {
+ type = "object";
+ }
+ this.context.currentGroup = new ParserGroup(type, name);
+ }
+ _processDeclaration(line: string): KeyValuePair {
+ const idx = line.indexOf("=");
+ const key = line.substring(0, idx).trim();
+ const value = this._parseData(line.slice(idx + 1));
+ return new KeyValuePair(key, value);
+ }
+ // TODO (zekth) Need refactor using ACC
+ _parseData(dataString: string): unknown {
+ dataString = dataString.trim();
+
+ if (this._isDate(dataString)) {
+ return new Date(dataString.split("#")[0].trim());
+ }
+
+ if (this._isLocalTime(dataString)) {
+ return eval(`"${dataString.split("#")[0].trim()}"`);
+ }
+
+ const cut3 = dataString.substring(0, 3).toLowerCase();
+ const cut4 = dataString.substring(0, 4).toLowerCase();
+ if (cut3 === "inf" || cut4 === "+inf") {
+ return Infinity;
+ }
+ if (cut4 === "-inf") {
+ return -Infinity;
+ }
+
+ if (cut3 === "nan" || cut4 === "+nan" || cut4 === "-nan") {
+ return NaN;
+ }
+
+ // If binary / octal / hex
+ const hex = /(0(?:x|o|b)[0-9a-f_]*)[^#]/gi.exec(dataString);
+ if (hex && hex[0]) {
+ return hex[0].trim();
+ }
+
+ const testNumber = this._isParsableNumber(dataString);
+ if (testNumber && !isNaN(testNumber as number)) {
+ return testNumber;
+ }
+
+ const invalidArr = /,\]/g.exec(dataString);
+ if (invalidArr) {
+ dataString = dataString.replace(/,]/g, "]");
+ }
+ const m = /(?:\'|\[|{|\").*(?:\'|\]|\"|})\s*[^#]/g.exec(dataString);
+ if (m) {
+ dataString = m[0].trim();
+ }
+ if (dataString[0] === "{" && dataString[dataString.length - 1] === "}") {
+ const reg = /([a-zA-Z0-9-_\.]*) (=)/gi;
+ let result;
+ while ((result = reg.exec(dataString))) {
+ const ogVal = result[0];
+ const newVal = ogVal
+ .replace(result[1], `"${result[1]}"`)
+ .replace(result[2], ":");
+ dataString = dataString.replace(ogVal, newVal);
+ }
+ return JSON.parse(dataString);
+ }
+
+ // Handle First and last EOL for multiline strings
+ if (dataString.startsWith(`"\\n`)) {
+ dataString = dataString.replace(`"\\n`, `"`);
+ } else if (dataString.startsWith(`'\\n`)) {
+ dataString = dataString.replace(`'\\n`, `'`);
+ }
+ if (dataString.endsWith(`\\n"`)) {
+ dataString = dataString.replace(`\\n"`, `"`);
+ } else if (dataString.endsWith(`\\n'`)) {
+ dataString = dataString.replace(`\\n'`, `'`);
+ }
+ return eval(dataString);
+ }
+ _isLocalTime(str: string): boolean {
+ const reg = /(\d{2}):(\d{2}):(\d{2})/;
+ return reg.test(str);
+ }
+ _isParsableNumber(dataString: string): number | boolean {
+ const m = /((?:\+|-|)[0-9_\.e+\-]*)[^#]/i.exec(dataString.trim());
+ if (!m) {
+ return false;
+ } else {
+ return parseFloat(m[0].replace(/_/g, ""));
+ }
+ }
+ _isDate(dateStr: string): boolean {
+ const reg = /\d{4}-\d{2}-\d{2}/;
+ return reg.test(dateStr);
+ }
+ _parseDeclarationName(declaration: string): string[] {
+ const out = [];
+ let acc = [];
+ let inLiteral = false;
+ for (let i = 0; i < declaration.length; i++) {
+ const c = declaration[i];
+ switch (c) {
+ case ".":
+ if (!inLiteral) {
+ out.push(acc.join(""));
+ acc = [];
+ } else {
+ acc.push(c);
+ }
+ break;
+ case `"`:
+ if (inLiteral) {
+ inLiteral = false;
+ } else {
+ inLiteral = true;
+ }
+ break;
+ default:
+ acc.push(c);
+ break;
+ }
+ }
+ if (acc.length !== 0) {
+ out.push(acc.join(""));
+ }
+ return out;
+ }
+ _parseLines(): void {
+ for (let i = 0; i < this.tomlLines.length; i++) {
+ const line = this.tomlLines[i];
+
+ // TODO (zekth) Handle unflat of array of tables
+ if (this._isGroup(line)) {
+ // if the current group is an array we push the
+ // parsed objects in it.
+ if (
+ this.context.currentGroup &&
+ this.context.currentGroup.type === "array"
+ ) {
+ this.context.currentGroup.arrValues.push(
+ this.context.currentGroup.objValues
+ );
+ this.context.currentGroup.objValues = {};
+ }
+ // If we need to create a group or to change group
+ if (
+ !this.context.currentGroup ||
+ (this.context.currentGroup &&
+ this.context.currentGroup.name !==
+ line.replace(/\[/g, "").replace(/\]/g, ""))
+ ) {
+ this._createGroup(line);
+ continue;
+ }
+ }
+ if (this._isDeclaration(line)) {
+ const kv = this._processDeclaration(line);
+ const key = kv.key;
+ const value = kv.value;
+ if (!this.context.currentGroup) {
+ this.context.output[key] = value;
+ } else {
+ this.context.currentGroup.objValues[key] = value;
+ }
+ }
+ }
+ if (this.context.currentGroup) {
+ if (this.context.currentGroup.type === "array") {
+ this.context.currentGroup.arrValues.push(
+ this.context.currentGroup.objValues
+ );
+ }
+ this._groupToOutput();
+ }
+ }
+ _cleanOutput(): void {
+ this._propertyClean(this.context.output);
+ }
+ _propertyClean(obj: Record<string, unknown>): void {
+ const keys = Object.keys(obj);
+ for (let i = 0; i < keys.length; i++) {
+ let k = keys[i];
+ if (k) {
+ let v = obj[k];
+ const pathDeclaration = this._parseDeclarationName(k);
+ delete obj[k];
+ if (pathDeclaration.length > 1) {
+ const shift = pathDeclaration.shift();
+ if (shift) {
+ k = shift.replace(/"/g, "");
+ v = this._unflat(pathDeclaration, v as object);
+ }
+ } else {
+ k = k.replace(/"/g, "");
+ }
+ obj[k] = v;
+ if (v instanceof Object) {
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ this._propertyClean(v as any);
+ }
+ }
+ }
+ }
+ parse(): object {
+ this._sanitize();
+ this._parseLines();
+ this._cleanOutput();
+ return this.context.output;
+ }
+}
+
+// Bare keys may only contain ASCII letters,
+// ASCII digits, underscores, and dashes (A-Za-z0-9_-).
+function joinKeys(keys: string[]): string {
+ // Dotted keys are a sequence of bare or quoted keys joined with a dot.
+ // This allows for grouping similar properties together:
+ return keys
+ .map((str: string): string => {
+ return str.match(/[^A-Za-z0-9_-]/) ? `"${str}"` : str;
+ })
+ .join(".");
+}
+
+class Dumper {
+ maxPad = 0;
+ srcObject: object;
+ output: string[] = [];
+ constructor(srcObjc: object) {
+ this.srcObject = srcObjc;
+ }
+ dump(): string[] {
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ this.output = this._parse(this.srcObject as any);
+ this.output = this._format();
+ return this.output;
+ }
+ _parse(obj: Record<string, unknown>, keys: string[] = []): string[] {
+ const out = [];
+ const props = Object.keys(obj);
+ const propObj = props.filter((e: string): boolean => {
+ if (obj[e] instanceof Array) {
+ const d: unknown[] = obj[e] as unknown[];
+ return !this._isSimplySerializable(d[0]);
+ }
+ return !this._isSimplySerializable(obj[e]);
+ });
+ const propPrim = props.filter((e: string): boolean => {
+ if (obj[e] instanceof Array) {
+ const d: unknown[] = obj[e] as unknown[];
+ return this._isSimplySerializable(d[0]);
+ }
+ return this._isSimplySerializable(obj[e]);
+ });
+ const k = propPrim.concat(propObj);
+ for (let i = 0; i < k.length; i++) {
+ const prop = k[i];
+ const value = obj[prop];
+ if (value instanceof Date) {
+ out.push(this._dateDeclaration([prop], value));
+ } else if (typeof value === "string" || value instanceof RegExp) {
+ out.push(this._strDeclaration([prop], value.toString()));
+ } else if (typeof value === "number") {
+ out.push(this._numberDeclaration([prop], value));
+ } else if (
+ value instanceof Array &&
+ this._isSimplySerializable(value[0])
+ ) {
+ // only if primitives types in the array
+ out.push(this._arrayDeclaration([prop], value));
+ } else if (
+ value instanceof Array &&
+ !this._isSimplySerializable(value[0])
+ ) {
+ // array of objects
+ for (let i = 0; i < value.length; i++) {
+ out.push("");
+ out.push(this._headerGroup([...keys, prop]));
+ out.push(...this._parse(value[i], [...keys, prop]));
+ }
+ } else if (typeof value === "object") {
+ out.push("");
+ out.push(this._header([...keys, prop]));
+ if (value) {
+ const toParse = value as Record<string, unknown>;
+ out.push(...this._parse(toParse, [...keys, prop]));
+ }
+ // out.push(...this._parse(value, `${path}${prop}.`));
+ }
+ }
+ out.push("");
+ return out;
+ }
+ _isSimplySerializable(value: unknown): boolean {
+ return (
+ typeof value === "string" ||
+ typeof value === "number" ||
+ value instanceof RegExp ||
+ value instanceof Date ||
+ value instanceof Array
+ );
+ }
+ _header(keys: string[]): string {
+ return `[${joinKeys(keys)}]`;
+ }
+ _headerGroup(keys: string[]): string {
+ return `[[${joinKeys(keys)}]]`;
+ }
+ _declaration(keys: string[]): string {
+ const title = joinKeys(keys);
+ if (title.length > this.maxPad) {
+ this.maxPad = title.length;
+ }
+ return `${title} = `;
+ }
+ _arrayDeclaration(keys: string[], value: unknown[]): string {
+ return `${this._declaration(keys)}${JSON.stringify(value)}`;
+ }
+ _strDeclaration(keys: string[], value: string): string {
+ return `${this._declaration(keys)}"${value}"`;
+ }
+ _numberDeclaration(keys: string[], value: number): string {
+ switch (value) {
+ case Infinity:
+ return `${this._declaration(keys)}inf`;
+ case -Infinity:
+ return `${this._declaration(keys)}-inf`;
+ default:
+ return `${this._declaration(keys)}${value}`;
+ }
+ }
+ _dateDeclaration(keys: string[], value: Date): string {
+ function dtPad(v: string, lPad = 2): string {
+ return pad(v, lPad, { char: "0" });
+ }
+ const m = dtPad((value.getUTCMonth() + 1).toString());
+ const d = dtPad(value.getUTCDate().toString());
+ const h = dtPad(value.getUTCHours().toString());
+ const min = dtPad(value.getUTCMinutes().toString());
+ const s = dtPad(value.getUTCSeconds().toString());
+ const ms = dtPad(value.getUTCMilliseconds().toString(), 3);
+ // formated date
+ const fData = `${value.getUTCFullYear()}-${m}-${d}T${h}:${min}:${s}.${ms}`;
+ return `${this._declaration(keys)}${fData}`;
+ }
+ _format(): string[] {
+ const rDeclaration = /(.*)\s=/;
+ const out = [];
+ for (let i = 0; i < this.output.length; i++) {
+ const l = this.output[i];
+ // we keep empty entry for array of objects
+ if (l[0] === "[" && l[1] !== "[") {
+ // empty object
+ if (this.output[i + 1] === "") {
+ i += 1;
+ continue;
+ }
+ out.push(l);
+ } else {
+ const m = rDeclaration.exec(l);
+ if (m) {
+ out.push(l.replace(m[1], pad(m[1], this.maxPad, { side: "right" })));
+ } else {
+ out.push(l);
+ }
+ }
+ }
+ // Cleaning multiple spaces
+ const cleanedOutput = [];
+ for (let i = 0; i < out.length; i++) {
+ const l = out[i];
+ if (!(l === "" && out[i + 1] === "")) {
+ cleanedOutput.push(l);
+ }
+ }
+ return cleanedOutput;
+ }
+}
+
+export function stringify(srcObj: object): string {
+ return new Dumper(srcObj).dump().join("\n");
+}
+
+export function parse(tomlString: string): object {
+ // File is potentially using EOL CRLF
+ tomlString = tomlString.replace(/\r\n/g, "\n").replace(/\\\n/g, "\n");
+ return new Parser(tomlString).parse();
+}
diff --git a/std/encoding/toml_test.ts b/std/encoding/toml_test.ts
new file mode 100644
index 000000000..22ecfa68a
--- /dev/null
+++ b/std/encoding/toml_test.ts
@@ -0,0 +1,410 @@
+// Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
+import { runIfMain, test } from "../testing/mod.ts";
+import { assertEquals } from "../testing/asserts.ts";
+import { existsSync } from "../fs/exists.ts";
+import { readFileStrSync } from "../fs/read_file_str.ts";
+import { parse, stringify } from "./toml.ts";
+import * as path from "../fs/path/mod.ts";
+
+const testFilesDir = path.resolve("encoding", "testdata");
+
+function parseFile(filePath: string): object {
+ if (!existsSync(filePath)) {
+ throw new Error(`File not found: ${filePath}`);
+ }
+ const strFile = readFileStrSync(filePath);
+ return parse(strFile);
+}
+
+test({
+ name: "[TOML] Strings",
+ fn(): void {
+ const expected = {
+ strings: {
+ str0: "deno",
+ str1: "Roses are not Deno\nViolets are not Deno either",
+ str2: "Roses are not Deno\nViolets are not Deno either",
+ str3: "Roses are not Deno\r\nViolets are not Deno either",
+ str4: 'this is a "quote"',
+ str5: "The quick brown\nfox jumps over\nthe lazy dog.",
+ str6: "The quick brown\nfox jumps over\nthe lazy dog.",
+ lines:
+ "The first newline is\ntrimmed in raw strings.\n All other " +
+ "whitespace\n is preserved."
+ }
+ };
+ const actual = parseFile(path.join(testFilesDir, "string.toml"));
+ assertEquals(actual, expected);
+ }
+});
+
+test({
+ name: "[TOML] CRLF",
+ fn(): void {
+ const expected = { boolean: { bool1: true, bool2: false } };
+ const actual = parseFile(path.join(testFilesDir, "CRLF.toml"));
+ assertEquals(actual, expected);
+ }
+});
+
+test({
+ name: "[TOML] Boolean",
+ fn(): void {
+ const expected = { boolean: { bool1: true, bool2: false } };
+ const actual = parseFile(path.join(testFilesDir, "boolean.toml"));
+ assertEquals(actual, expected);
+ }
+});
+
+test({
+ name: "[TOML] Integer",
+ fn(): void {
+ const expected = {
+ integer: {
+ int1: 99,
+ int2: 42,
+ int3: 0,
+ int4: -17,
+ int5: 1000,
+ int6: 5349221,
+ int7: 12345,
+ hex1: "0xDEADBEEF",
+ hex2: "0xdeadbeef",
+ hex3: "0xdead_beef",
+ oct1: "0o01234567",
+ oct2: "0o755",
+ bin1: "0b11010110"
+ }
+ };
+ const actual = parseFile(path.join(testFilesDir, "integer.toml"));
+ assertEquals(actual, expected);
+ }
+});
+
+test({
+ name: "[TOML] Float",
+ fn(): void {
+ const expected = {
+ float: {
+ flt1: 1.0,
+ flt2: 3.1415,
+ flt3: -0.01,
+ flt4: 5e22,
+ flt5: 1e6,
+ flt6: -2e-2,
+ flt7: 6.626e-34,
+ flt8: 224_617.445_991_228,
+ sf1: Infinity,
+ sf2: Infinity,
+ sf3: -Infinity,
+ sf4: NaN,
+ sf5: NaN,
+ sf6: NaN
+ }
+ };
+ const actual = parseFile(path.join(testFilesDir, "float.toml"));
+ assertEquals(actual, expected);
+ }
+});
+
+test({
+ name: "[TOML] Arrays",
+ fn(): void {
+ const expected = {
+ arrays: {
+ data: [["gamma", "delta"], [1, 2]],
+ hosts: ["alpha", "omega"]
+ }
+ };
+ const actual = parseFile(path.join(testFilesDir, "arrays.toml"));
+ assertEquals(actual, expected);
+ }
+});
+
+test({
+ name: "[TOML] Table",
+ fn(): void {
+ const expected = {
+ deeply: {
+ nested: {
+ object: {
+ in: {
+ the: {
+ toml: {
+ name: "Tom Preston-Werner"
+ }
+ }
+ }
+ }
+ }
+ },
+ servers: {
+ alpha: {
+ ip: "10.0.0.1",
+ dc: "eqdc10"
+ },
+ beta: {
+ ip: "10.0.0.2",
+ dc: "eqdc20"
+ }
+ }
+ };
+ const actual = parseFile(path.join(testFilesDir, "table.toml"));
+ assertEquals(actual, expected);
+ }
+});
+
+test({
+ name: "[TOML] Simple",
+ fn(): void {
+ const expected = {
+ deno: "is",
+ not: "[node]",
+ regex: "<ic*s*>",
+ NANI: "何?!",
+ comment: "Comment inside # the comment"
+ };
+ const actual = parseFile(path.join(testFilesDir, "simple.toml"));
+ assertEquals(actual, expected);
+ }
+});
+
+test({
+ name: "[TOML] Datetime",
+ fn(): void {
+ const expected = {
+ datetime: {
+ odt1: new Date("1979-05-27T07:32:00Z"),
+ odt2: new Date("1979-05-27T00:32:00-07:00"),
+ odt3: new Date("1979-05-27T00:32:00.999999-07:00"),
+ odt4: new Date("1979-05-27 07:32:00Z"),
+ ld1: new Date("1979-05-27"),
+ lt1: "07:32:00",
+ lt2: "00:32:00.999999"
+ }
+ };
+ const actual = parseFile(path.join(testFilesDir, "datetime.toml"));
+ assertEquals(actual, expected);
+ }
+});
+
+test({
+ name: "[TOML] Inline Table",
+ fn(): void {
+ const expected = {
+ inlinetable: {
+ nile: {
+ also: {
+ malevolant: {
+ creation: {
+ drum: {
+ kit: "Tama"
+ }
+ }
+ }
+ },
+ derek: {
+ roddy: "drummer"
+ }
+ },
+ name: {
+ first: "Tom",
+ last: "Preston-Werner"
+ },
+ point: {
+ x: 1,
+ y: 2
+ },
+ dog: {
+ type: {
+ name: "pug"
+ }
+ },
+ "tosin.abasi": "guitarist",
+ animal: {
+ as: {
+ leaders: "tosin"
+ }
+ }
+ }
+ };
+ const actual = parseFile(path.join(testFilesDir, "inlineTable.toml"));
+ assertEquals(actual, expected);
+ }
+});
+
+test({
+ name: "[TOML] Array of Tables",
+ fn(): void {
+ const expected = {
+ bin: [
+ { name: "deno", path: "cli/main.rs" },
+ { name: "deno_core", path: "src/foo.rs" }
+ ],
+ nib: [{ name: "node", path: "not_found" }]
+ };
+ const actual = parseFile(path.join(testFilesDir, "arrayTable.toml"));
+ assertEquals(actual, expected);
+ }
+});
+
+test({
+ name: "[TOML] Cargo",
+ fn(): void {
+ /* eslint-disable @typescript-eslint/camelcase */
+ const expected = {
+ workspace: { members: ["./", "core"] },
+ bin: [{ name: "deno", path: "cli/main.rs" }],
+ package: { name: "deno", version: "0.3.4", edition: "2018" },
+ dependencies: {
+ deno_core: { path: "./core" },
+ ansi_term: "0.11.0",
+ atty: "0.2.11",
+ dirs: "1.0.5",
+ flatbuffers: "0.5.0",
+ futures: "0.1.25",
+ getopts: "0.2.18",
+ http: "0.1.16",
+ hyper: "0.12.24",
+ "hyper-rustls": "0.16.0",
+ "integer-atomics": "1.0.2",
+ lazy_static: "1.3.0",
+ libc: "0.2.49",
+ log: "0.4.6",
+ rand: "0.6.5",
+ regex: "1.1.0",
+ remove_dir_all: "0.5.1",
+ ring: "0.14.6",
+ rustyline: "3.0.0",
+ serde_json: "1.0.38",
+ "source-map-mappings": "0.5.0",
+ tempfile: "3.0.7",
+ tokio: "0.1.15",
+ "tokio-executor": "0.1.6",
+ "tokio-fs": "0.1.5",
+ "tokio-io": "0.1.11",
+ "tokio-process": "0.2.3",
+ "tokio-threadpool": "0.1.11",
+ url: "1.7.2"
+ },
+ target: { "cfg(windows)": { dependencies: { winapi: "0.3.6" } } }
+ };
+ /* eslint-enable @typescript-eslint/camelcase */
+ const actual = parseFile(path.join(testFilesDir, "cargo.toml"));
+ assertEquals(actual, expected);
+ }
+});
+
+test({
+ name: "[TOML] Stringify",
+ fn(): void {
+ const src = {
+ foo: { bar: "deno" },
+ this: { is: { nested: "denonono" } },
+ "https://deno.land/std": {
+ $: "doller"
+ },
+ "##": {
+ deno: {
+ "https://deno.land": {
+ proto: "https",
+ ":80": "port"
+ }
+ }
+ },
+ arrayObjects: [{ stuff: "in" }, {}, { the: "array" }],
+ deno: "is",
+ not: "[node]",
+ regex: "<ic*s*>",
+ NANI: "何?!",
+ comment: "Comment inside # the comment",
+ int1: 99,
+ int2: 42,
+ int3: 0,
+ int4: -17,
+ int5: 1000,
+ int6: 5349221,
+ int7: 12345,
+ flt1: 1.0,
+ flt2: 3.1415,
+ flt3: -0.01,
+ flt4: 5e22,
+ flt5: 1e6,
+ flt6: -2e-2,
+ flt7: 6.626e-34,
+ odt1: new Date("1979-05-01T07:32:00Z"),
+ odt2: new Date("1979-05-27T00:32:00-07:00"),
+ odt3: new Date("1979-05-27T00:32:00.999999-07:00"),
+ odt4: new Date("1979-05-27 07:32:00Z"),
+ ld1: new Date("1979-05-27"),
+ reg: /foo[bar]/,
+ sf1: Infinity,
+ sf2: Infinity,
+ sf3: -Infinity,
+ sf4: NaN,
+ sf5: NaN,
+ sf6: NaN,
+ data: [["gamma", "delta"], [1, 2]],
+ hosts: ["alpha", "omega"]
+ };
+ const expected = `deno = "is"
+not = "[node]"
+regex = "<ic*s*>"
+NANI = "何?!"
+comment = "Comment inside # the comment"
+int1 = 99
+int2 = 42
+int3 = 0
+int4 = -17
+int5 = 1000
+int6 = 5349221
+int7 = 12345
+flt1 = 1
+flt2 = 3.1415
+flt3 = -0.01
+flt4 = 5e+22
+flt5 = 1000000
+flt6 = -0.02
+flt7 = 6.626e-34
+odt1 = 1979-05-01T07:32:00.000
+odt2 = 1979-05-27T07:32:00.000
+odt3 = 1979-05-27T07:32:00.999
+odt4 = 1979-05-27T07:32:00.000
+ld1 = 1979-05-27T00:00:00.000
+reg = "/foo[bar]/"
+sf1 = inf
+sf2 = inf
+sf3 = -inf
+sf4 = NaN
+sf5 = NaN
+sf6 = NaN
+data = [["gamma","delta"],[1,2]]
+hosts = ["alpha","omega"]
+
+[foo]
+bar = "deno"
+
+[this.is]
+nested = "denonono"
+
+["https://deno.land/std"]
+"$" = "doller"
+
+["##".deno."https://deno.land"]
+proto = "https"
+":80" = "port"
+
+[[arrayObjects]]
+stuff = "in"
+
+[[arrayObjects]]
+
+[[arrayObjects]]
+the = "array"
+`;
+ const actual = stringify(src);
+ assertEquals(actual, expected);
+ }
+});
+
+runIfMain(import.meta);