fix: builds should no accumulate on repeated changes (closes #404)
This commit is contained in:
parent
a25bc8a5e4
commit
107c8d413a
@ -393,10 +393,16 @@ See the [documentation](https://quartz.jzhao.xyz) for how to get started.
|
||||
})
|
||||
|
||||
const buildMutex = new Mutex()
|
||||
const timeoutIds = new Set()
|
||||
let lastBuildMs = 0
|
||||
let cleanupBuild = null
|
||||
const build = async (clientRefresh) => {
|
||||
const buildStart = new Date().getTime()
|
||||
lastBuildMs = buildStart
|
||||
const release = await buildMutex.acquire()
|
||||
if (lastBuildMs > buildStart) {
|
||||
release()
|
||||
return
|
||||
}
|
||||
|
||||
if (cleanupBuild) {
|
||||
await cleanupBuild()
|
||||
@ -428,12 +434,6 @@ See the [documentation](https://quartz.jzhao.xyz) for how to get started.
|
||||
clientRefresh()
|
||||
}
|
||||
|
||||
const rebuild = (clientRefresh) => {
|
||||
timeoutIds.forEach((id) => clearTimeout(id))
|
||||
timeoutIds.clear()
|
||||
timeoutIds.add(setTimeout(() => build(clientRefresh), 250))
|
||||
}
|
||||
|
||||
if (argv.serve) {
|
||||
const connections = []
|
||||
const clientRefresh = () => connections.forEach((conn) => conn.send("rebuild"))
|
||||
@ -539,7 +539,7 @@ See the [documentation](https://quartz.jzhao.xyz) for how to get started.
|
||||
ignoreInitial: true,
|
||||
})
|
||||
.on("all", async () => {
|
||||
rebuild(clientRefresh)
|
||||
build(clientRefresh)
|
||||
})
|
||||
} else {
|
||||
await build(() => {})
|
||||
|
@ -81,7 +81,7 @@ async function startServing(
|
||||
}
|
||||
|
||||
const initialSlugs = ctx.allSlugs
|
||||
const timeoutIds: Set<ReturnType<typeof setTimeout>> = new Set()
|
||||
let lastBuildMs = 0
|
||||
const toRebuild: Set<FilePath> = new Set()
|
||||
const toRemove: Set<FilePath> = new Set()
|
||||
const trackedAssets: Set<FilePath> = new Set()
|
||||
@ -111,11 +111,14 @@ async function startServing(
|
||||
}
|
||||
|
||||
// debounce rebuilds every 250ms
|
||||
timeoutIds.add(
|
||||
setTimeout(async () => {
|
||||
|
||||
const buildStart = new Date().getTime()
|
||||
lastBuildMs = buildStart
|
||||
const release = await mut.acquire()
|
||||
timeoutIds.forEach((id) => clearTimeout(id))
|
||||
timeoutIds.clear()
|
||||
if (lastBuildMs > buildStart) {
|
||||
release()
|
||||
return
|
||||
}
|
||||
|
||||
const perf = new PerfTimer()
|
||||
console.log(chalk.yellow("Detected change, rebuilding..."))
|
||||
@ -137,11 +140,11 @@ async function startServing(
|
||||
contentMap.delete(fp)
|
||||
}
|
||||
|
||||
const parsedFiles = [...contentMap.values()]
|
||||
const filteredContent = filterContent(ctx, parsedFiles)
|
||||
// TODO: we can probably traverse the link graph to figure out what's safe to delete here
|
||||
// instead of just deleting everything
|
||||
await rimraf(argv.output)
|
||||
const parsedFiles = [...contentMap.values()]
|
||||
const filteredContent = filterContent(ctx, parsedFiles)
|
||||
await emitContent(ctx, filteredContent)
|
||||
console.log(chalk.green(`Done rebuilding in ${perf.timeSince()}`))
|
||||
} catch {
|
||||
@ -152,8 +155,6 @@ async function startServing(
|
||||
toRebuild.clear()
|
||||
toRemove.clear()
|
||||
release()
|
||||
}, 250),
|
||||
)
|
||||
}
|
||||
|
||||
const watcher = chokidar.watch(".", {
|
||||
@ -168,7 +169,6 @@ async function startServing(
|
||||
.on("unlink", (fp) => rebuild(fp, "delete"))
|
||||
|
||||
return async () => {
|
||||
timeoutIds.forEach((id) => clearTimeout(id))
|
||||
await watcher.close()
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user