WIP - add extractor, generate snippet_data

This commit is contained in:
Stefan Fejes
2019-08-20 15:52:05 +02:00
parent 88084d3d30
commit cc8f1d8a7a
37396 changed files with 4588842 additions and 133 deletions

View File

@ -1,10 +0,0 @@
import autopep8
import os
import util
snippets = util.read_snippets()
for snippet in snippets:
formatedCode = autopep8.fix_code(snippet.read_code()).strip()
fixedCode = snippet.content.replace(snippet.read_code(),formatedCode)
snippetFile = open(f"snippets/{snippet.name}.md",'w')
snippetFile.write(fixedCode)
snippetFile.close()

137
scripts/build.js Normal file
View File

@ -0,0 +1,137 @@
/*
This is the builder script that generates the README file.
Run using `npm run builder`.
*/
// Load modules
const fs = require('fs-extra');
const path = require('path');
const { green, red } = require('kleur');
const util = require('./util');
const markdown = require('markdown-builder');
const { headers, misc, lists } = markdown;
const config = require('../config');
// Paths (relative to package.json)
const SNIPPETS_PATH = `./${config.snippetPath}`;
const STATIC_PARTS_PATH = `./${config.staticPartsPath}`;
// Terminate if parent commit is a Travis build
if (
util.isTravisCI() &&
/^Travis build: \d+/g.test(process.env['TRAVIS_COMMIT_MESSAGE'])
) {
console.log(
`${green(
'NOBUILD',
)} README build terminated, parent commit is a Travis build!`,
);
process.exit(0);
}
// Setup everything
let snippets = {},
snippetsArray = [],
startPart = '',
endPart = '',
output = '';
const EMOJIS = {};
console.time('Builder');
// Synchronously read all snippets from snippets folder and sort them as necessary (case-insensitive)
snippets = util.readSnippets(SNIPPETS_PATH);
snippetsArray = Object.keys(snippets).reduce((acc, key) => {
acc.push(snippets[key]);
return acc;
}, []);
// Load static parts for the README file
try {
startPart = fs.readFileSync(
path.join(STATIC_PARTS_PATH, 'README-start.md'),
'utf8',
);
endPart = fs.readFileSync(
path.join(STATIC_PARTS_PATH, 'README-end.md'),
'utf8',
);
} catch (err) {
console.log(`${red('ERROR!')} During static part loading: ${err}`);
process.exit(1);
}
// Create the output for the README file
try {
const tags = util.prepTaggedData(
Object.keys(snippets).reduce((acc, key) => {
acc[key] = snippets[key].attributes.tags;
return acc;
}, {}),
);
output += `${startPart}\n`;
// Loop over tags and snippets to create the table of contents
for (const tag of tags) {
const capitalizedTag = util.capitalize(tag, true);
const taggedSnippets = snippetsArray.filter(
snippet => snippet.attributes.tags[0] === tag,
);
output += headers.h3((EMOJIS[tag] || '') + ' ' + capitalizedTag).trim();
output +=
misc.collapsible(
'View contents',
lists.ul(taggedSnippets, snippet =>
misc.link(
`\`${snippet.title}\``,
`${misc.anchor(snippet.title)}${
snippet.attributes.tags.includes('advanced') ? '-' : ''
}`,
),
),
) + '\n';
}
for (const tag of tags) {
const capitalizedTag = util.capitalize(tag, true);
const taggedSnippets = snippetsArray.filter(
snippet => snippet.attributes.tags[0] === tag,
);
output +=
misc.hr() + headers.h2((EMOJIS[tag] || '') + ' ' + capitalizedTag) + '\n';
for (let snippet of taggedSnippets) {
if (snippet.attributes.tags.includes('advanced'))
output +=
headers.h3(
snippet.title + ' ' + misc.image('advanced', '/advanced.svg'),
) + '\n';
else output += headers.h3(snippet.title) + '\n';
output += snippet.attributes.text;
output += `\`\`\`${config.language}\n${snippet.attributes.codeBlocks.code}\n\`\`\``;
output += misc.collapsible(
'Examples',
`\`\`\`${config.language}\n${snippet.attributes.codeBlocks.examples}\n\`\`\``,
);
output +=
'\n<br>' + misc.link('⬆ Back to top', misc.anchor('Contents')) + '\n';
}
}
// Add the ending static part
output += `\n${endPart}\n`;
// Write to the README file
fs.writeFileSync('README.md', output);
} catch (err) {
console.log(`${red('ERROR!')} During README generation: ${err}`);
process.exit(1);
}
console.log(`${green('SUCCESS!')} README file generated!`);
console.timeEnd('Builder');

80
scripts/extract.js Normal file
View File

@ -0,0 +1,80 @@
/*
This is the extractor script that generates the snippets.json file.
Run using `npm run extractor`.
*/
// Load modules
const fs = require('fs-extra');
const path = require('path');
const { green } = require('kleur');
const util = require('./util');
const config = require('../config');
// Paths (relative to package.json)
const SNIPPETS_PATH = `./${config.snippetPath}`;
const OUTPUT_PATH = `./${config.snippetDataPath}`;
// Check if running on Travis, only build for cron jobs and custom builds
if (
util.isTravisCI() &&
process.env['TRAVIS_EVENT_TYPE'] !== 'cron' &&
process.env['TRAVIS_EVENT_TYPE'] !== 'api'
) {
console.log(
`${green(
'NOBUILD',
)} snippet extraction terminated, not a cron or api build!`,
);
process.exit(0);
}
// Setup everything
let snippets = {},
snippetsArray = [];
console.time('Extractor');
// Synchronously read all snippets from snippets folder and sort them as necessary (case-insensitive)
snippets = util.readSnippets(SNIPPETS_PATH);
snippetsArray = Object.keys(snippets).reduce((acc, key) => {
acc.push(snippets[key]);
return acc;
}, []);
const completeData = {
data: [...snippetsArray],
meta: {
specification: 'http://jsonapi.org/format/',
type: 'snippetArray',
},
};
let listingData = {
data: completeData.data.map(v => ({
id: v.id,
type: 'snippetListing',
title: v.title,
attributes: {
text: v.attributes.text,
tags: v.attributes.tags,
},
meta: {
hash: v.meta.hash,
},
})),
meta: {
specification: 'http://jsonapi.org/format/',
type: 'snippetListingArray',
},
};
// Write files
fs.writeFileSync(
path.join(OUTPUT_PATH, 'snippets.json'),
JSON.stringify(completeData, null, 2),
);
fs.writeFileSync(
path.join(OUTPUT_PATH, 'snippetList.json'),
JSON.stringify(listingData, null, 2),
);
// Display messages and time
console.log(
`${green('SUCCESS!')} snippets.json and snippetList.json files generated!`,
);
console.timeEnd('Extractor');

View File

@ -1,10 +0,0 @@
import util
import subprocess
import sys
for snippet in util.read_snippets():
print(snippet.name)
code = snippet.read_code()
check_1 = subprocess.run(['flake8', '-','--select=E901,E999,F821,F822,F823','--count','--show-source','--statistics'], input=code, encoding='utf8',stdout=subprocess.PIPE)
check_2 = subprocess.run(['flake8', '-','--exit-zero','--max-complexity=10','--count','--max-line-length=127','--statistics','--ignore=W292'], input=code, encoding='utf8',stdout=subprocess.PIPE)
check_1.check_returncode()
check_2.check_returncode()

View File

@ -1,43 +0,0 @@
import util
from collections import defaultdict
def title_case(str):
return str[:1].upper() + str[1:].lower()
EMOJIS = {
'adapter': ':electric_plug:',
'list': ':books:',
'browser': ':globe_with_meridians:',
'date': ':stopwatch:',
'function': ':control_knobs:',
'logic': ':crystal_ball:',
'math': ':heavy_division_sign:',
'media': ':tv:',
'node': ':package:',
'object': ':card_file_box:',
'string': ':scroll:',
'type': ':page_with_curl:',
'utility': ':wrench:'
}
def tagger():
tag_db = defaultdict(list)
for snippet in util.read_snippets():
tag_db[snippet.category[0]].append(snippet)
return tag_db
start = util.read_readme_start()
end = util.read_readme_end()
toAppend = ''
tag_dict = tagger()
author_database = util.author_reader()
for category in sorted(tag_dict):
toAppend = toAppend + '### ' + EMOJIS[category] + ' ' + title_case(category) +'\n\n<details><summary>View contents</summary> <ul>'
for snippet in sorted(tag_dict[category],key=lambda snippet : snippet.name):
toAppend += f'<li><a href = "#{snippet.name}"><code>{snippet.name}</code></a></li>\n'
toAppend += '</ul></details>\n\n'
toAppend += '<hr></hr> \n\n'
for category in sorted(tag_dict):
toAppend = toAppend + '## ' + EMOJIS[category] + ' ' + title_case(category) +'\n\n'
for snippet in sorted(tag_dict[category],key=lambda snippet : snippet.name):
author,contributors = author_database[snippet.name]
contributors = ', '.join(contributors)
toAppend += f'### {snippet.name} \n<span style="color:grey">Author:-</span> {author} \n\n <span style="color:grey">Contributors:-</span>{contributors}\n\n{snippet.read_description()}\n```py\n{snippet.read_code()}\n```\n<details><summary>View Examples</summary>\n\n```py\n{snippet.read_example()}\n```\n</details>\n\n<br><a href = "#table-of-contents">:arrow_up: Back to top</a>\n\n'
open("README.md",'w').write(start+toAppend+'\n'+end)

View File

@ -1,67 +0,0 @@
import os,re
def author_reader():
contributor_file = open('contributor_database')
author_database = {}
contributor_db = contributor_file.read().split('\n')
contributor_db = [contributor for contributor in contributor_db if contributor.strip() != '']
for contributor_data_db in contributor_db:
snippetName,contributor_data = contributor_data_db.split(':')
author = contributor_data.split(',')[0]
contributors = contributor_data.split(',')
author = re.sub('(\[[\w\s]+\]\()\@(\w+)\)','\g<1>https://www.github.com/\g<2>)',author.strip())
contributors = [re.sub('(\[[\w\s]+\]\()\@(\w+)\)','\g<1>https://www.github.com/\g<2>)',contributor) for contributor in contributors]
author_database[snippetName] = (author,contributors)
return author_database
def tagger():
tag_database_file = open('tag_database')
tag_database = tag_database_file.read()
tag_database_file.close()
tag_database = [tag for tag in tag_database.split('\n') if tag.strip() != '']
tag_db = {}
for tag in tag_database:
tag_db[tag.split(':')[0].strip()] = tag.split(':')[1].strip().split(',')
return tag_db
def read_snippets():
snippet_files = os.listdir('snippets')
snippets = []
class snippet():
def __init__(self,file_location):
with open(file_location) as f:
self.content = f.read()
self.codeRe = "```\s*python([\s\S]*?)```"
self.titleRe = '###\\s*([\\w]+)'
self.descRe = '###\s*\w+\s*([\s\S]+)```\s*python[\s\S]+```\s*```\s*python[\s\S]+```'
self.name = self.read_title()
self.category = tagger()[self.name]
def read_code(self):
return re.findall(self.codeRe,self.content)[0].strip()
def read_title(self):
return re.findall(self.titleRe,self.content)[0].strip()
def read_description(self):
return re.findall(self.descRe,self.content)[0].strip()
def read_example(self):
return re.findall(self.codeRe,self.content)[1].strip()
for snippet_file in snippet_files:
snippets.append(snippet(f'snippets/{snippet_file}'))
return snippets
def read_readme_start():
with open('static-parts/readme-start.md') as f:
readme_start = f.read()
return readme_start
def read_readme_end():
with open('static-parts/readme-end.md') as f:
readme_end = f.read()
return readme_end

View File

@ -0,0 +1,12 @@
// Checks if current environment is Travis CI, Cron builds, API builds
const isTravisCI = () => 'TRAVIS' in process.env && 'CI' in process.env;
const isTravisCronOrAPI = () =>
process.env['TRAVIS_EVENT_TYPE'] === 'cron' ||
process.env['TRAVIS_EVENT_TYPE'] === 'api';
const isNotTravisCronOrAPI = () => !isTravisCronOrAPI();
module.exports = {
isTravisCI,
isTravisCronOrAPI,
isNotTravisCronOrAPI,
};

60
scripts/util/helpers.js Normal file
View File

@ -0,0 +1,60 @@
const config = require('../../config');
const getMarkDownAnchor = paragraphTitle =>
paragraphTitle
.trim()
.toLowerCase()
.replace(/[^\w\- ]+/g, '')
.replace(/\s/g, '-')
.replace(/\-+$/, '');
// Creates an object from pairs
const objectFromPairs = arr => arr.reduce((a, v) => ((a[v[0]] = v[1]), a), {});
// Optimizes nodes in an HTML document
const optimizeNodes = (data, regexp, replacer) => {
let count = 0;
let output = data;
do {
output = output.replace(regexp, replacer);
count = 0;
while (regexp.exec(output) !== null) ++count;
} while (count > 0);
return output;
};
// Capitalizes the first letter of a string
const capitalize = (str, lowerRest = false) =>
str.slice(0, 1).toUpperCase() +
(lowerRest ? str.slice(1).toLowerCase() : str.slice(1));
const prepTaggedData = tagDbData =>
[...new Set(Object.entries(tagDbData).map(t => t[1][0]))]
.filter(v => v)
.sort((a, b) =>
capitalize(a, true) === 'Uncategorized'
? 1
: capitalize(b, true) === 'Uncategorized'
? -1
: a.localeCompare(b),
);
const makeExamples = data => {
data =
data.slice(0, data.lastIndexOf(`\`\`\`${config.language}`)).trim() +
misc.collapsible(
'Examples',
data.slice(
data.lastIndexOf(`\`\`\`${config.language}`),
data.lastIndexOf('```'),
) + data.slice(data.lastIndexOf('```')),
);
return `${data}\n<br>${misc.link(
'⬆ Back to top',
misc.anchor('Contents'),
)}\n\n`;
};
module.exports = {
getMarkDownAnchor,
objectFromPairs,
optimizeNodes,
capitalize,
prepTaggedData,
makeExamples,
};

37
scripts/util/index.js Normal file
View File

@ -0,0 +1,37 @@
const {
isTravisCI,
isTravisCronOrAPI,
isNotTravisCronOrAPI,
} = require('./environmentCheck');
const {
getMarkDownAnchor,
objectFromPairs,
optimizeNodes,
capitalize,
prepTaggedData,
makeExamples,
} = require('./helpers');
const {
getFilesInDir,
hashData,
getCodeBlocks,
getTextualContent,
readSnippets,
} = require('./snippetParser');
module.exports = {
isTravisCI,
isTravisCronOrAPI,
isNotTravisCronOrAPI,
getMarkDownAnchor,
objectFromPairs,
optimizeNodes,
capitalize,
prepTaggedData,
makeExamples,
getFilesInDir,
hashData,
getCodeBlocks,
getTextualContent,
readSnippets,
};

View File

@ -0,0 +1,118 @@
const fs = require('fs-extra'),
path = require('path'),
{ red } = require('kleur'),
crypto = require('crypto'),
frontmatter = require('front-matter');
const config = require('../../config');
// Reade all files in a directory
const getFilesInDir = (directoryPath, withPath, exclude = null) => {
try {
let directoryFilenames = fs.readdirSync(directoryPath);
directoryFilenames.sort((a, b) => {
a = a.toLowerCase();
b = b.toLowerCase();
if (a < b) return -1;
if (a > b) return 1;
return 0;
});
if (withPath) {
// a hacky way to do conditional array.map
return directoryFilenames.reduce((fileNames, fileName) => {
if (
exclude == null ||
!exclude.some(toExclude => fileName === toExclude)
)
fileNames.push(`${directoryPath}/${fileName}`);
return fileNames;
}, []);
}
return directoryFilenames.filter(v => v !== 'README.md');
} catch (err) {
console.log(`${red('ERROR!')} During snippet loading: ${err}`);
process.exit(1);
}
};
// Creates a hash for a value using the SHA-256 algorithm.
const hashData = val =>
crypto
.createHash('sha256')
.update(val)
.digest('hex');
// Gets the code blocks for a snippet file.
const getCodeBlocks = str => {
const regex = /```[.\S\s]*?```/g;
let results = [];
let m = null;
while ((m = regex.exec(str)) !== null) {
if (m.index === regex.lastIndex) regex.lastIndex += 1;
m.forEach((match, groupIndex) => {
results.push(match);
});
}
const replacer = new RegExp(
`\`\`\`${config.language}([\\s\\S]*?)\`\`\``,
'g',
);
results = results.map(v => v.replace(replacer, '$1').trim());
return {
code: results[0],
example: results[1],
};
};
// Gets the textual content for a snippet file.
const getTextualContent = str => {
const regex = /([\s\S]*?)```/g;
const results = [];
let m = null;
while ((m = regex.exec(str)) !== null) {
if (m.index === regex.lastIndex) regex.lastIndex += 1;
m.forEach((match, groupIndex) => {
results.push(match);
});
}
return results[1].replace(/\r\n/g, '\n');
};
// Synchronously read all snippets and sort them as necessary (case-insensitive)
const readSnippets = snippetsPath => {
const snippetFilenames = getFilesInDir(snippetsPath, false);
let snippets = {};
try {
for (let snippet of snippetFilenames) {
let data = frontmatter(
fs.readFileSync(path.join(snippetsPath, snippet), 'utf8'),
);
snippets[snippet] = {
id: snippet.slice(0, -3),
title: data.attributes.title,
type: 'snippet',
attributes: {
fileName: snippet,
text: getTextualContent(data.body),
codeBlocks: getCodeBlocks(data.body),
tags: data.attributes.tags.split(',').map(t => t.trim()),
},
meta: {
hash: hashData(data.body),
},
};
}
} catch (err) {
console.log(`${red('ERROR!')} During snippet loading: ${err}`);
process.exit(1);
}
return snippets;
};
module.exports = {
getFilesInDir,
hashData,
getCodeBlocks,
getTextualContent,
readSnippets,
};