Skip to content

Commit

Permalink
feat: scoring data tests and readme documentation added
Browse files Browse the repository at this point in the history
  • Loading branch information
PagoNxt-Trade committed Nov 23, 2022
1 parent dd1cc9e commit 4cd1eee
Show file tree
Hide file tree
Showing 23 changed files with 741 additions and 9 deletions.
43 changes: 43 additions & 0 deletions docs/guides/2-cli.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ Other options include:
--stdin-filepath path to a file to pretend that stdin comes from [string]
--resolver path to custom json-ref-resolver instance [string]
-r, --ruleset path/URL to a ruleset file [string]
-s, --scoring-matrix path/URL to a scoring matrix config file [string]
-F, --fail-severity results of this level or above will trigger a failure exit code
[string] [choices: "error", "warn", "info", "hint"] [default: "error"]
-D, --display-only-failures only output results equal to or greater than --fail-severity [boolean] [default: false]
Expand All @@ -60,6 +61,48 @@ Here you can build a [custom ruleset](../getting-started/3-rulesets.md), or exte
- [OpenAPI ruleset](../reference/openapi-rules.md)
- [AsyncAPI ruleset](../reference/asyncapi-rules.md)

## Using a Scoring Matrix File

If you want to specify a custom scoring matrix and some results count customization, you can add a config file in json format in that way:

```bash
spectral lint ./reference/**/*.oas*.{json,yml,yaml} --scoring-matrix ./scoringFile.json
```
or
```bash
spectral lint ./reference/**/*.oas*.{json,yml,yaml} -s ./scoringFile.json
```

Heres an example of this config file:

```
{
"scoringSubtract":
{
"error": [ 0,55,65,75,75,75,85,85,85,85,95 ],
"warn": [ 0,3,7,10,10,10,15,15,15,15,18 ]
},
"scoringLetter":
{
"A":75,
"B":65,
"C":55,
"D":45,
"E":0
},
"threshold":50,
"warningsSubtract": true,
"uniqueErrors": false
}
```

Where:
- scoringSubctract : An object with an array for every result level we want to substract percentage, with the perctentage to subsctrac from 0 to 10 on every result type
- scoringLetter : An object with key/value pairs with scoring letter and scoring percentage, that the result must be greater , for this letter
- threshold : A number with minimum percentage value to provide valid the file we are checking
- warningsSubtract : A boolean to setup if accumulate the result types to less the scorgin percentage or stop counting on most critical result types
- uniqueErrors : A boolean to setup a count with unique errors or with all of them

## Error Results

Spectral has a few different error severities: `error`, `warn`, `info`, and `hint`, and they are in "order" from highest to lowest. By default, all results will be shown regardless of severity, but since v5.0, only the presence of errors will cause a failure status code of 1. Seeing results and getting a failure code for it are now two different things.
Expand Down
20 changes: 18 additions & 2 deletions packages/cli/src/commands/__tests__/lint.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,22 @@ describe('lint', () => {
);
});

it('calls lint with document, ruleset and scoring matrix config file', async () => {
const doc = './__fixtures__/empty-oas2-document.json';
const ruleset = 'custom-ruleset.json';
const configFile = 'scoring-matrix.json';
await run(`lint -r ${ruleset} -s ${configFile} ${doc}`);
expect(lint).toBeCalledWith([doc], {
encoding: 'utf8',
format: ['stylish'],
output: { stylish: '<stdout>' },
ruleset: 'custom-ruleset.json',
stdinFilepath: undefined,
ignoreUnknownFormat: false,
failOnUnmatchedGlobs: false,
});
});

it.each(['json', 'stylish'])('calls formatOutput with %s format', async format => {
await run(`lint -f ${format} ./__fixtures__/empty-oas2-document.json`);
expect(formatOutput).toBeCalledWith(results, format, { failSeverity: DiagnosticSeverity.Error });
Expand Down Expand Up @@ -244,13 +260,13 @@ describe('lint', () => {
expect(process.stderr.write).nthCalledWith(2, `Error #1: ${chalk.red('some unhandled exception')}\n`);
expect(process.stderr.write).nthCalledWith(
3,
expect.stringContaining(`packages/cli/src/commands/__tests__/lint.test.ts:236`),
expect.stringContaining(`packages/cli/src/commands/__tests__/lint.test.ts:252`),
);

expect(process.stderr.write).nthCalledWith(4, `Error #2: ${chalk.red('another one')}\n`);
expect(process.stderr.write).nthCalledWith(
5,
expect.stringContaining(`packages/cli/src/commands/__tests__/lint.test.ts:237`),
expect.stringContaining(`packages/cli/src/commands/__tests__/lint.test.ts:253`),
);

expect(process.stderr.write).nthCalledWith(6, `Error #3: ${chalk.red('original exception')}\n`);
Expand Down
5 changes: 2 additions & 3 deletions packages/cli/src/commands/lint.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import { formatOutput, writeOutput } from '../services/output';
import { FailSeverity, ILintConfig, OutputFormat } from '../services/config';

import { CLIError } from '../errors';
import { getScoringMatrix } from '../formatters/utils';
import { getScoringMatrix } from '../formatters/utils/getScoring';

const formatOptions = Object.values(OutputFormat);

Expand Down Expand Up @@ -128,11 +128,10 @@ const lintCommand: CommandModule = {
description: 'path/URL to a ruleset file',
type: 'string',
},
scoringMatrix: {
'scoring-matrix': {
alias: 's',
description: 'path/URL to a scoring matrix config file',
type: 'string',

},
'fail-severity': {
alias: 'F',
Expand Down
6 changes: 5 additions & 1 deletion packages/cli/src/formatters/json.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,9 @@ export const json: Formatter = (results: ISpectralDiagnostic[], options: Formatt
let groupedResults;
let scoringText = '';
if (options.scoringMatrix !== void 0) {
spectralVersion = options.scoringMatrix.customScoring + (version as string);
if (options.scoringMatrix.customScoring !== undefined) {
spectralVersion = `${options.scoringMatrix.customScoring} ${version as string}`;
}
groupedResults = groupBySource(uniqueErrors(results));
scoringText = getScoringText(getCountsBySeverity(groupedResults), options.scoringMatrix);
}
Expand All @@ -26,9 +28,11 @@ export const json: Formatter = (results: ISpectralDiagnostic[], options: Formatt
});
let objectOutput;
if (options.scoringMatrix !== void 0) {
const scoring = +(scoringText !== null ? scoringText.replace('%', '').split(/[()]+/)[1] : 0);
objectOutput = {
version: spectralVersion,
scoring: scoringText.replace('SCORING:', '').trim(),
passed: scoring >= options.scoringMatrix.threshold,
results: outputJson,
};
} else {
Expand Down
6 changes: 6 additions & 0 deletions packages/cli/src/formatters/pretty.ts
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,12 @@ export const pretty: Formatter = (results: ISpectralDiagnostic[], options: Forma
output += chalk[summaryColor].bold(`\u2716${summaryText !== null ? ` ${summaryText}` : ''}\n`);
if (options.scoringMatrix !== void 0) {
output += chalk[scoringColor].bold(`\u2716${scoringText !== null ? ` ${scoringText}` : ''}\n`);
const scoring = +(scoringText !== null ? scoringText.replace('%', '').split(/[()]+/)[1] : 0);
if (scoring >= options.scoringMatrix.threshold) {
output += chalk['green'].bold(`\u2716 PASSED!\n`);
} else {
output += chalk['red'].bold(`\u2716 NOT PASSED!\n`);
}
}

return output;
Expand Down
6 changes: 6 additions & 0 deletions packages/cli/src/formatters/stylish.ts
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,12 @@ export const stylish: Formatter = (results: ISpectralDiagnostic[], options: Form
output += chalk[summaryColor].bold(`\u2716 ${summaryText}\n`);
if (options.scoringMatrix !== void 0) {
output += chalk[scoringColor].bold(`\u2716${scoringText !== null ? ` ${scoringText}` : ''}\n`);
const scoring = +(scoringText !== null ? scoringText.replace('%', '').split(/[()]+/)[1] : 0);
if (scoring >= options.scoringMatrix.threshold) {
output += chalk['green'].bold(`\u2716 PASSED!\n`);
} else {
output += chalk['red'].bold(`\u2716 NOT PASSED!\n`);
}
}

return output;
Expand Down
4 changes: 2 additions & 2 deletions packages/cli/src/formatters/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@ import type { DiagnosticSeverity } from '@stoplight/types';
export type ScoringTable = {
[key in HumanReadableDiagnosticSeverity]: number[];
};
interface ScoringLevel {
export interface ScoringLevel {
[key: string]: number;
}
export type ScoringMatrix = {
customScoring: string;
customScoring?: string;
scoringSubtract: ScoringTable[];
scoringLetter: ScoringLevel[];
threshold: number;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
{
"scoringSubtract":
{
"error": [ 0,55,65,75,75,75,85,85,85,85,95 ],
"warn": [ 0,3,7,10,10,10,15,15,15,15,18 ]
},
"scoringLetter":
{
"A": 75,
"B": 65,
"C": 55,
"D": 45,
"E": 0
},
"threshold": 50,
"warningsSubtract": true,
"uniqueErrors": false
}
17 changes: 17 additions & 0 deletions packages/cli/src/services/__tests__/linter.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ jest.mock('../output');
const validCustomOas3SpecPath = resolve(__dirname, '__fixtures__/openapi-3.0-valid-custom.yaml');
const invalidRulesetPath = resolve(__dirname, '__fixtures__/ruleset-invalid.js');
const validRulesetPath = resolve(__dirname, '__fixtures__/ruleset-valid.js');
const validScoringMatrixRulesetPath = resolve(__dirname, '__fixtures__/scorint-matrix.json');
const validOas3SpecPath = resolve(__dirname, './__fixtures__/openapi-3.0-valid.yaml');

async function run(command: string) {
Expand Down Expand Up @@ -368,6 +369,22 @@ describe('Linter service', () => {
});
});

describe('--scoring-matrix ', () => {
describe('when single scoring-matrix option provided', () => {
it('outputs normal output if it does not exist', () => {
return expect(
run(`lint ${validCustomOas3SpecPath} -r ${validRulesetPath} -s non-existent-path`),
).resolves.toEqual([]);
});

it('outputs no issues', () => {
return expect(
run(`lint ${validCustomOas3SpecPath} -r ${validRulesetPath} -s ${validScoringMatrixRulesetPath}`),
).resolves.toEqual([]);
});
});
});

describe('when loading specification files from web', () => {
it('outputs no issues', () => {
const document = join(__dirname, `./__fixtures__/stoplight-info-document.json`);
Expand Down
46 changes: 46 additions & 0 deletions packages/cli/src/services/__tests__/output.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import { DiagnosticSeverity } from '@stoplight/types';
import * as fs from 'fs';
import * as process from 'process';
import * as formatters from '../../formatters';
import { ScoringLevel, ScoringTable } from '../../formatters/types';
import { OutputFormat } from '../config';
import { formatOutput, writeOutput } from '../output';

Expand All @@ -14,6 +15,23 @@ jest.mock('fs', () => ({
}));
jest.mock('process');

const scoringMatrix = {
scoringSubtract: {
error: [0, 55, 65, 75, 75, 75, 85, 85, 85, 85, 95],
warn: [0, 3, 7, 10, 10, 10, 15, 15, 15, 15, 18],
} as unknown as ScoringTable[],
scoringLetter: {
A: 75,
B: 65,
C: 55,
D: 45,
E: 0,
} as unknown as ScoringLevel[],
threshold: 50,
warningsSubtract: true,
uniqueErrors: false,
};

describe('Output service', () => {
describe('formatOutput', () => {
it.each(['stylish', 'json', 'junit'])('calls %s formatter with given result', format => {
Expand Down Expand Up @@ -41,6 +59,34 @@ describe('Output service', () => {
(formatters[format] as jest.Mock).mockReturnValueOnce(output);
expect(formatOutput(results, format as OutputFormat, { failSeverity: DiagnosticSeverity.Error })).toEqual(output);
});

it.each(['stylish', 'json', 'pretty'])('calls %s formatter with given result and scoring-matrix', format => {
const results = [
{
code: 'info-contact',
path: ['info'],
message: 'Info object should contain `contact` object.',
severity: DiagnosticSeverity.Information,
range: {
start: {
line: 2,
character: 9,
},
end: {
line: 6,
character: 19,
},
},
source: '/home/Stoplight/spectral/src/__tests__/__fixtures__/petstore.oas3.json',
},
];

const output = `value for ${format}`;
(formatters[format] as jest.Mock).mockReturnValueOnce(output);
expect(
formatOutput(results, format as OutputFormat, { failSeverity: DiagnosticSeverity.Error, scoringMatrix }),
).toEqual(output);
});
});

describe('writeOutput', () => {
Expand Down
Loading

0 comments on commit 4cd1eee

Please sign in to comment.