diff --git a/gulp/content/index.js b/gulp/content/index.js index 581e2d9..af5d9fe 100644 --- a/gulp/content/index.js +++ b/gulp/content/index.js @@ -7,175 +7,46 @@ const log = require('fancy-log'); const tweetparse = require('../lib/tweetparse'); const getEngines = require('./renderers'); const Twitter = require('twitter-lite'); -const frontmatter = require('front-matter'); +const Page = require('./page'); const createFileLoader = require('./files'); -const { URL } = require('url'); const ROOT = path.resolve(__dirname, '../..'); exports.parse = async function parsePageContent () { - const [ files, twitter, twitterBackup, twitterCache, { siteInfo } ] = await Promise.all([ + const [ files, twitter, twitterBackup, twitterCache ] = await Promise.all([ glob('pages/**/*.{md,hbs,html,xml}', { cwd: ROOT }), fs.readJson(resolve('twitter-config.json')).catch(() => null) .then(getTwitterClient), - fs.readJson(resolve('twitter-backup.json')).catch(() => {}), - fs.readJson(resolve('twitter-cache.json')).catch(() => {}), - fs.readJson(resolve('package.json')).catch(() => ({})), + fs.readJson(resolve('twitter-backup.json')).catch(() => ({})), + fs.readJson(resolve('twitter-cache.json')).catch(() => ({})), ]); - const loadFiles = createFileLoader(); - const tweetsNeeded = []; + + let tweetsNeeded = []; + const tweetsPresent = Object.keys(twitterCache); + const artifactLoader = createFileLoader(); let pages = await Promise.map(files, async (filepath) => { - const { dir, name, ext } = path.parse(filepath); - const basename = path.basename(filepath); + const page = new Page(filepath); + if (!page.input) return; + await page.load({ artifactLoader }); - // this is an include, skip it. - if (name[0] === '_') return; - - const cwd = resolve(dir); - const input = resolve(filepath); - const outDir = path.join('dist', dir.slice(6)); - const siteDir = `/${dir.slice(6)}`; - - // if cwd === ROOT then we're in the bottom directory and there is no base - const base = path.relative(cwd, ROOT) && path.basename(dir); - - /* Load Page Content **************************************************/ - const [ raw, { ctime, mtime }, { images, titlecard } ] = await Promise.all([ - fs.readFile(input).catch(() => null), - stat(input), - loadFiles(cwd, siteDir), - ]); - - // empty file - if (!raw) return; - - try { - var { attributes: meta, body } = frontmatter(raw.toString('utf8')); - } catch (e) { - log.error('Error while parsing frontmatter for ' + filepath, e); - return; + if (page.tweets.length) { + const missing = difference(page.tweets, tweetsPresent); + tweetsNeeded.push(...missing); } - // page is marked to be ignored, skip it. - if (meta.ignore) return; - - meta.path = filepath; - meta.cwd = cwd; - meta.base = base; - meta.outDir = outDir; - meta.input = input; - meta.source = body; - meta.dateCreated = meta.date && new Date(meta.date) || ctime; - meta.dateModified = mtime; - meta.siteDir = siteDir; - meta.name = name; - meta.ext = ext; - meta.titlecard = titlecard; - meta.images = images; - - var flags = new Set(meta.classes || []); - var isIndexPage = meta.isIndex = (name === 'index'); - var isRootPage = meta.isRoot = (siteDir === '/'); - var isCleanUrl = meta.isCleanUrl = [ '.hbs', '.md' ].includes(ext); - - if ([ '.hbs', '.html', '.xml' ].includes(ext)) { - meta.engine = 'hbs'; - } else if (ext === '.md') { - meta.engine = 'md'; - } else { - meta.engine = 'raw'; - } - - flags.add(titlecard ? 'has-titlecard' : 'no-titlecard'); - flags.add(meta.title ? 'has-title' : 'no-title'); - flags.add(meta.subtitle ? 'has-subtitle' : 'no-subtitle'); - flags.add(meta.description ? 'has-descrip' : 'no-descrip'); - - let slug, output, jsonOutput; - if (isRootPage) { - if (isCleanUrl) { - slug = ''; - output = resolve(outDir, name, 'index.html'); - jsonOutput = resolve(outDir, name + '.json'); - } else { - slug = ''; - output = resolve(outDir, basename); - jsonOutput = resolve(outDir, basename + '.json'); - } - } else if (isCleanUrl) { - slug = name; - if (isIndexPage) { - output = resolve(outDir, 'index.html'); - } else { - output = resolve(outDir, name, 'index.html'); - } - jsonOutput = resolve(outDir, name + '.json'); - } else { - slug = base; - output = resolve(outDir, basename); - jsonOutput = resolve(outDir, basename + '.json'); - } - meta.slug = slug; - meta.output = output; - meta.json = jsonOutput; - - const url = new URL(siteInfo.siteUrl); - if ([ '.hbs', '.md' ].includes(ext)) { - url.pathname = path.join(siteDir, slug); - } else if (isIndexPage) { - url.pathname = siteDir; - } else { - url.pathname = path.join(siteDir, path.basename(filepath)); - } - meta.url = url.pathname; - meta.fullurl = url.toString(); - - - /* Process Tweets **************************************************/ - - const tweets = []; - - if (meta.tweet) { - meta.tweet = [ meta.tweet ].flat(1).map(parseTweetId); - tweets.push(...meta.tweet); - } - - if (meta.tweets) { - meta.tweets = meta.tweets.map(parseTweetId); - tweets.push(...meta.tweets); - } - - for (const id of tweets) { - if (!twitterCache[id]) { - tweetsNeeded.push(id); - } - } - - meta.tweets = tweets; - - flags.add(tweets.length ? 'has-tweets' : 'no-tweets'); - - /* Process Flags **************************************************/ - - meta.classes = Array.from(flags); - meta.flags = meta.classes.reduce((res, item) => { - var camelCased = item.replace(/-([a-z])/g, (g) => g[1].toUpperCase()); - res[camelCased] = true; - return res; - }, {}); - - return meta; + return page; }); pages = pages.filter(Boolean); + tweetsNeeded = uniq(tweetsNeeded); /* Load Missing Tweets **************************************************/ if (tweetsNeeded.length) { log('Fetching tweets: ' + tweetsNeeded.join(', ')); - const arriving = await Promise.all(chunk(uniq(tweetsNeeded), 99).map(twitter)); + const arriving = await Promise.all(chunk(tweetsNeeded, 99).map(twitter)); const loaded = []; for (const tweet of arriving.flat(1)) { @@ -217,7 +88,7 @@ exports.parse = async function parsePageContent () { } await Promise.all([ - fs.writeFile(path.join(ROOT, 'pages.json'), JSON.stringify(pages, null, 2)), + fs.writeFile(path.join(ROOT, 'pages.json'), JSON.stringify(pages.map((p) => p.toJson()), null, 2)), fs.writeFile(path.join(ROOT, 'twitter-media.json'), JSON.stringify(twitterMedia, null, 2)), fs.writeFile(path.join(ROOT, 'twitter-cache.json'), JSON.stringify(twitterCache, null, 2)), fs.writeFile(path.join(ROOT, 'twitter-backup.json'), JSON.stringify(twitterBackup, null, 2)), @@ -234,34 +105,38 @@ exports.write = async function writePageContent ({ prod }) { ]); await Promise.map(pages, async (page) => { + // page = new Page(page); + var data = { ...page, - meta: page, + meta: { ...page.meta, ...page }, page: { domain: siteInfo.domain, - title: page.title - ? (page.title + (page.subtitle ? ', ' + page.subtitle : '') + ' :: ' + siteInfo.title) + title: page.meta.title + ? (page.meta.title + (page.meta.subtitle ? ', ' + page.meta.subtitle : '') + ' :: ' + siteInfo.title) : siteInfo.title, + description: page.meta.description || siteInfo.description, }, local: { cwd: page.cwd, root: ROOT, - basename: path.basename(page.input), + basename: page.basename, }, pages, }; - const html = engines[page.engine](data.source, data).toString(); + const html = String(engines[page.engine](data.source, data)); const json = page.json && { url: page.fullurl, - title: page.title, - subtitle: page.subtitle, - description: page.description, + title: page.meta.title, + subtitle: page.meta.subtitle, + description: page.meta.description, tweets: page.tweets, images: page.images, dateCreated: page.dateCreated, dateModified: page.dateModified, titlecard: page.titlecard, + preview: page.engine === 'md' && String(engines.preview(data.source, data)), }; await fs.ensureDir(path.dirname(page.output)); @@ -277,18 +152,6 @@ exports.write.prod = function writePageContentForProduction () { return exports. /* Utility Functions **************************************************/ -const tweeturl = /https?:\/\/twitter\.com\/(?:#!\/)?(?:\w+)\/status(?:es)?\/(\d+)/i; -const tweetidcheck = /^\d+$/; -function parseTweetId (tweetid) { - // we can't trust an id that isn't a string - if (typeof tweetid !== 'string') return false; - - const match = tweetid.match(tweeturl); - if (match) return match[1]; - if (tweetid.match(tweetidcheck)) return tweetid; - return false; -} - function resolve (fpath, ...args) { if (fpath[0] === '/') fpath = fpath.slice(1); return path.resolve(ROOT, fpath, ...args); @@ -301,5 +164,3 @@ function getTwitterClient (config) { .get('statuses/lookup', { id: tweetids.join(','), tweet_mode: 'extended' }) .catch((e) => { log.error(e); return []; }); } - -const stat = (f) => fs.stat(f).catch(() => undefined); diff --git a/gulp/content/page.js b/gulp/content/page.js new file mode 100644 index 0000000..cc596fe --- /dev/null +++ b/gulp/content/page.js @@ -0,0 +1,173 @@ + +const path = require('path'); +const Promise = require('bluebird'); +const fs = require('fs-extra'); +const log = require('fancy-log'); +const frontmatter = require('front-matter'); +const { URL } = require('url'); +const { pick, omit } = require('lodash'); + +const ROOT = path.resolve(__dirname, '../..'); +const pkg = require(resolve('package.json')); + + +/* Utility Functions **************************************************/ + +const MD = '.md'; +const HBS = '.hbs'; +const HTML = '.html'; +const XML = '.xml'; + +const tweeturl = /https?:\/\/twitter\.com\/(?:#!\/)?(?:\w+)\/status(?:es)?\/(\d+)/i; +const tweetidcheck = /^\d+$/; +function parseTweetId (tweetid) { + // we can't trust an id that isn't a string + if (typeof tweetid !== 'string') return false; + + const match = tweetid.match(tweeturl); + if (match) return match[1]; + if (tweetid.match(tweetidcheck)) return tweetid; + return false; +} + +function resolve (...args) { + args = args.filter(Boolean); + let fpath = args.shift(); + if (!fpath) return ROOT; + if (fpath[0] === '/') fpath = fpath.slice(1); + return path.resolve(ROOT, fpath, ...args); +} + + +module.exports = exports = class Page { + + constructor (filepath) { + if (filepath && typeof filepath === 'object') { + // we've been passed a json object, treat as serialized Page + Object.assign(this, filepath); + return this; + } + + const file = path.parse(filepath); + const { base: basename, name, ext } = file; + + // this file is an include, skip it. + if (name[0] === '_') return false; + + // this is not a page file + if (![ MD, HBS, HTML, XML ].includes(ext)) return false; + + this.input = resolve(filepath); // /local/path/to/pages/folder/file.ext + this.cwd = resolve(file.dir); // /local/path/to/pages/, pages/folder, pages/folder/subfolder + this.base = file.dir.replace(/^pages\/?/, ''); // '', 'folder', 'folder/subfolder' + this.dir = file.dir.replace(/^pages\/?/, '/'); // /, /folder, /folder/subfolder + this.name = name; // index, fileA, fileB + this.basename = basename; // index.ext, fileA.ext, fileB.ext + this.dest = file.dir.replace(/^pages\/?/, 'dist/'); // dist/, dist/folder, dist/folder/subfolder + + var isIndexPage = (name === 'index'); + var isCleanUrl = [ HBS, MD ].includes(ext); + + if (isCleanUrl && isIndexPage) { + this.out = path.join(this.dest, 'index.html'); + this.json = path.join(this.dest, 'index.json'); + this.url = this.dir; + } else if (isCleanUrl) { + this.out = path.join(this.dest, this.name, 'index.html'); + this.json = path.join(this.dest, this.name + '.json'); + this.url = path.join(this.dir, this.name); + } else if (isIndexPage) { + this.out = path.join(this.dest, 'index.html'); + this.json = path.join(this.dest, this.name + '.json'); + this.url = this.dir; + } else { + this.out = path.join(this.dest, this.basename); + this.json = path.join(this.dest, this.basename + '.json'); + this.url = path.join(this.dir, this.basename); + } + + this.output = resolve(this.out); + + const url = new URL(pkg.siteInfo.siteUrl); + url.pathname = this.url; + this.fullurl = url.href; + + if ([ HBS, HTML, XML ].includes(ext)) { + this.engine = 'hbs'; + } else if (ext === MD) { + this.engine = 'md'; + } else { + this.engine = 'raw'; + } + + } + + async load ({ artifactLoader }) { + const [ raw, { ctime, mtime }, { images, titlecard } ] = await Promise.all([ + fs.readFile(this.input).catch(() => null), + fs.stat(this.input).catch(() => {}), + artifactLoader(this.cwd, this.dir), + ]); + + // empty file + if (!raw || !ctime) { + log.error('Could not load page: ' + this.filepath); + return false; + } + + try { + var { attributes: meta, body } = frontmatter(raw.toString('utf8')); + } catch (e) { + log.error('Error while parsing frontmatter for ' + this.filepath, e); + return false; + } + + this.source = body; + this.meta = meta; + this.images = images; + this.titlecard = titlecard; + this.tweets = (meta.tweets || []).map(parseTweetId); + this.dateCreated = meta.date && new Date(meta.date) || ctime; + this.dateModified = mtime; + + this.classes = Array.from(new Set(meta.classes || [])); + this.flags = this.classes.reduce((res, item) => { + var camelCased = item.replace(/-([a-z])/g, (g) => g[1].toUpperCase()); + res[camelCased] = true; + return res; + }, {}); + + return this; + } + + toJson () { + const j = pick(this, [ + 'input', + 'output', + 'json', + 'dateCreated', + 'dateModified', + 'cwd', + 'base', + 'dir', + 'name', + 'basename', + 'dest', + 'out', + 'url', + 'fullurl', + 'engine', + 'source', + 'images', + 'titlecard', + 'tweets', + 'classes', + 'flags', + ]); + + j.meta = omit(this.meta, [ 'date', 'classes', 'tweets' ]); + + return j; + } + +}; diff --git a/package.json b/package.json index 6139bfb..82ebae6 100644 --- a/package.json +++ b/package.json @@ -13,6 +13,7 @@ "title": "That's Gender Dysphoria, FYI", "domain": "genderdysphoria.fyi", "siteUrl": "https://genderdysphoria.fyi", + "description": "A resource for those questioning their gender, already on a gender journey, or simply wanting to learn more about what it is to be transgender.", "rss": { "title": "That's Gender Dysphoria, FYI", "feed_url": "https://genderdysphoria.fyi/atom.xml", diff --git a/pages/gdb/_pager.hbs b/pages/gdb/_pager.hbs index 43c9798..5b11613 100644 --- a/pages/gdb/_pager.hbs +++ b/pages/gdb/_pager.hbs @@ -1,4 +1,3 @@ -