Add demo for showing vs code's tokenization

This commit is contained in:
Anthony Sottile
2020-03-27 17:59:35 -07:00
parent 032c3d78fc
commit 50ad1e06f9
3 changed files with 58 additions and 0 deletions

2
testing/vsc_test/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
/node_modules
/package-lock.json

View File

@@ -0,0 +1,5 @@
{
"dependencies": [
"vscode-textmate"
]
}

51
testing/vsc_test/vsc.js Normal file
View File

@@ -0,0 +1,51 @@
const fs = require('fs');
const vsctm = require('vscode-textmate');
if (process.argv.length < 4) {
console.log('usage: t.js GRAMMAR FILE');
process.exit(1);
}
const grammar = process.argv[2];
const file = process.argv[3];
const scope = JSON.parse(fs.readFileSync(grammar, {encoding: 'UTF-8'})).scopeName;
/**
* Utility to read a file as a promise
*/
function readFile(path) {
return new Promise((resolve, reject) => {
fs.readFile(path, (error, data) => error ? reject(error) : resolve(data));
})
}
// Create a registry that can create a grammar from a scope name.
const registry = new vsctm.Registry({
loadGrammar: (scopeName) => {
if (scopeName === scope) {
return readFile(grammar).then(data => vsctm.parseRawGrammar(data.toString(), grammar))
}
console.log(`Unknown scope name: ${scopeName}`);
return null;
}
});
// Load the JavaScript grammar and any other grammars included by it async.
registry.loadGrammar(scope).then(grammar => {
const text = fs.readFileSync(file, {encoding: 'UTF-8'}).trimEnd('\n').split(/\n/);
let ruleStack = vsctm.INITIAL;
for (let i = 0; i < text.length; i++) {
const line = text[i];
const lineTokens = grammar.tokenizeLine(line, ruleStack);
console.log(`\nTokenizing line: ${line}`);
for (let j = 0; j < lineTokens.tokens.length; j++) {
const token = lineTokens.tokens[j];
console.log(` - token from ${token.startIndex} to ${token.endIndex} ` +
`(${line.substring(token.startIndex, token.endIndex)}) ` +
`with scopes ${token.scopes.join(', ')}`
);
}
ruleStack = lineTokens.ruleStack;
}
});