-
Notifications
You must be signed in to change notification settings - Fork 166
/
Makefile
306 lines (251 loc) · 12.5 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
.PHONY: all
all: clean install test vet lint check-dl0 check-gofmt check-headers check-retina build
.PHONY: build
build:
$(shell go env GOPATH)/bin/sorg build
.PHONY: check-d10
check-dl0:
scripts/check_dl0.sh
.PHONY: check-gofmt
check-gofmt:
scripts/check_gofmt.sh
.PHONY: check-headers
check-headers:
scripts/check_headers.sh
.PHONY: check-retina
check-retina:
scripts/check_retina.sh
.PHONY: clean
clean:
mkdir -p public/
rm -f -r public/*
.PHONY: compile
compile: install
# Long TTL (in seconds) to set on an object in S3. This is suitable for items
# that we expect to only have to invalidate very rarely like images. Although
# we set it for all assets, those that are expected to change more frequently
# like script or stylesheet files are versioned by a path that can be set at
# build time.
LONG_TTL := 86400
# Short TTL (in seconds) to set on an object in S3. This is suitable for items
# that are expected to change more frequently like any HTML file.
SHORT_TTL := 3600
.PHONY: deploy
deploy: check-target-dir
# Note that AWS_ACCESS_KEY_ID will only be set for builds on the master branch
# because it's stored in GitHub as a secret variable. Secret variables are not
# made available to non-master branches because of the risk of being leaked
# through a script in a rogue pull request.
ifdef AWS_ACCESS_KEY_ID
aws --version
@echo "\n=== Syncing HTML files\n"
# Force text/html for HTML because we're not using an extension.
#
# Note that we don't delete because it could result in a race condition in
# that files that are uploaded with special directives below could be
# removed even while the S3 bucket is actively in-use.
aws s3 sync $(TARGET_DIR) s3://$(S3_BUCKET)/ --acl public-read --cache-control max-age=$(SHORT_TTL) --content-type text/html --exclude 'assets*' --exclude 'photographs*' $(AWS_CLI_FLAGS)
@echo "\n=== Syncing media assets\n"
# Then move on to assets and allow S3 to detect content type.
#
# Note use of `--size-only` because mtimes may vary as they're not
# preserved by Git. Any updates to a static asset are likely to change its
# size though.
aws s3 sync $(TARGET_DIR)/assets/ s3://$(S3_BUCKET)/assets/ --acl public-read --cache-control max-age=$(LONG_TTL) --follow-symlinks --size-only $(AWS_CLI_FLAGS)
@echo "\n=== Syncing photographs\n"
# Photographs are identical to assets above except without `--delete`
# because any given build probably doesn't have the entire set.
aws s3 sync $(TARGET_DIR)/photographs/ s3://$(S3_BUCKET)/photographs/ --acl public-read --cache-control max-age=$(LONG_TTL) --follow-symlinks --size-only $(AWS_CLI_FLAGS)
@echo "\n=== Syncing Atom feeds\n"
# Upload Atom feed files with their proper content type.
find $(TARGET_DIR) -name '*.atom' | sed "s|^\$(TARGET_DIR)/||" | xargs -I{} -n1 aws s3 cp $(TARGET_DIR)/{} s3://$(S3_BUCKET)/{} --acl public-read --cache-control max-age=$(SHORT_TTL) --content-type application/xml
@echo "\n=== Syncing index HTML files\n"
# This one is a bit tricker to explain, but what we're doing here is
# uploading directory indexes as files at their directory name. So for
# example, 'articles/index.html` gets uploaded as `articles`.
#
# We do all this work because CloudFront/S3 has trouble with index files.
# An S3 static site can have index.html set to indexes, but CloudFront only
# has the notion of a "root object" which is an index at the top level.
#
# We do this during deploy instead of during build for two reasons:
#
# 1. Some directories need to have an index *and* other files. We must name
# index files with `index.html` locally though because a file and
# directory cannot share a name.
# 2. The `index.html` files are useful for emulating a live server locally:
# Golang's http.FileServer will respect them as indexes.
find $(TARGET_DIR) -name index.html | egrep -v '$(TARGET_DIR)/index.html' | sed "s|^$(TARGET_DIR)/||" | xargs -I{} -n1 dirname {} | xargs -I{} -n1 aws s3 cp $(TARGET_DIR)/{}/index.html s3://$(S3_BUCKET)/{} --acl public-read --cache-control max-age=$(SHORT_TTL) --content-type text/html
@echo "\n=== Fixing robots.txt content type\n"
# Give robots.txt (if it exists) a Content-Type of text/plain. Twitter is
# rabid about this.
[ -f $(TARGET_DIR)/robots.txt ] && aws s3 cp $(TARGET_DIR)/robots.txt s3://$(S3_BUCKET)/ --acl public-read --cache-control max-age=$(SHORT_TTL) --content-type text/plain $(AWS_CLI_FLAGS) || echo "no robots.txt"
@echo "\n=== Setting redirects\n"
# Set redirects on specific objects. Generally old stuff that I ended up
# refactoring to live somewhere else. Note that for these to work, S3 web
# hosting must be on, and CloudFront must be pointed to the S3 web hosting
# URL rather than the REST endpoint.
# Redirects are commented out for now. They should already be in place and
# they're quite slow to run in the build process.
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key glass --website-redirect-location /newsletter
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/berlin.atom --website-redirect-location /sequences.atom
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/berlin/001 --website-redirect-location /sequences/001
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/berlin/002 --website-redirect-location /sequences/002
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/berlin/003 --website-redirect-location /sequences/003
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/berlin/004 --website-redirect-location /sequences/004
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/berlin/005 --website-redirect-location /sequences/005
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/berlin/006 --website-redirect-location /sequences/006
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/berlin/007 --website-redirect-location /sequences/007
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/berlin/008 --website-redirect-location /sequences/008
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/berlin/009 --website-redirect-location /sequences/009
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/berlin/010 --website-redirect-location /sequences/010
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/2020-light.atom --website-redirect-location /sequences.atom
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/2020-light/011 --website-redirect-location /sequences/011
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/2020-light/012 --website-redirect-location /sequences/012
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/2020-light/013 --website-redirect-location /sequences/013
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/2020-light/014 --website-redirect-location /sequences/014
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/2020-light/015 --website-redirect-location /sequences/015
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/2020-light/016 --website-redirect-location /sequences/016
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/2020-light/017 --website-redirect-location /sequences/017
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/2020-light/018 --website-redirect-location /sequences/018
# aws s3api put-object --acl public-read --bucket $(S3_BUCKET) --key sequences/2020-light/019 --website-redirect-location /sequences/019
else
# No AWS access key. Skipping deploy.
endif
.PHONY: install
install:
go install .
# Invalidates CloudFront's cache for paths specified in PATHS.
# Usage:
# make PATHS="/fragments /fragments/six-weeks" invalidate
.PHONY: invalidate
invalidate: check-aws-keys check-cloudfront-id
ifndef PATHS
$(error PATHS is required)
endif
aws cloudfront create-invalidation --distribution-id $(CLOUDFRONT_ID) --paths ${PATHS}
# Invalidates CloudFront's entire cache.
.PHONY: invalidate-all
invalidate-all: check-aws-keys check-cloudfront-id
aws cloudfront create-invalidation --distribution-id $(CLOUDFRONT_ID) --paths /
# Invalidates CloudFront's cached assets.
.PHONY: invalidate-assets
invalidate-assets: check-aws-keys check-cloudfront-id
aws cloudfront create-invalidation --distribution-id $(CLOUDFRONT_ID) --paths /assets
# Invalidates CloudFront's cached index pages. This is useful, but not
# necessarily required, when publishing articles or new data (if it's not run,
# anything cached in CloudFront will expire naturally after SHORT_TTL).
.PHONY: invalidate-indexes
invalidate-indexes: check-aws-keys check-cloudfront-id
aws cloudfront create-invalidation --distribution-id $(CLOUDFRONT_ID) --paths /articles /articles.atom /fragments /fragments.atom /nanoglyphs /nanoglyphs.atom /now /passages /passages.atom /twitter
.PHONY: killall
killall:
killall sorg
.PHONY: lint
lint:
golangci-lint run --fix
.PHONY: loop
loop:
$(shell go env GOPATH)/bin/sorg loop
# `git add` any markers not already tracked by Git.
markers-add:
git ls-files $(DIR) --others --exclude-standard '*.marker' | xargs -n1 git add
# Resets all `.marker` files showing up as modified in Git because their
# modified time was slightly altered. Useful after a big photo sync that leaves
# a bunch of markers in a modified or added state.
.PHONY: markers-gco
markers-gco:
git ls-files --modified --exclude-standard "*.marker" | xargs git checkout
# Remove untracked `.marker` files, which can be useful in cases where markers
# have been committed to the Git repository on one computer, and Git is now
# preventing a pull on a different one because it'd overwrite local markers
# (that are functionally the same thing but considered different).
#
# Remove all untracked markers:
#
# make markers-rm
#
# Remove untracked markers in a specific directory:
#
# make DIR="/content/photographs/sequences" markers-rm
#
.PHONY: markers-rm
markers-rm:
git ls-files $(DIR) --others --exclude-standard '*.marker' | xargs -n1 rm
# A specialized S3 bucket used only for caching resized photographs.
PHOTOGRAPHS_S3_BUCKET := "brandur.org-photographs"
.PHONY: photographs-download
photographs-download:
ifdef AWS_ACCESS_KEY_ID
aws s3 sync s3://$(PHOTOGRAPHS_S3_BUCKET)/ content/photographs/
else
# No AWS access key. Skipping photographs-download.
endif
.PHONY: photographs-download-markers
photographs-download-markers:
ifdef AWS_ACCESS_KEY_ID
aws s3 sync s3://$(PHOTOGRAPHS_S3_BUCKET)/ content/photographs/ --exclude "*" --include "*.marker"
else
# No AWS access key. Skipping photographs-download-markers.
endif
.PHONY: photographs-upload
photographs-upload:
ifdef AWS_ACCESS_KEY_ID
aws s3 sync content/photographs/ s3://$(PHOTOGRAPHS_S3_BUCKET)/ --size-only
else
# No AWS access key. Skipping photographs-upload.
endif
.PHONY: sigusr2
sigusr2:
killall -SIGUSR2 sorg
# sigusr2 aliases
.PHONY: reboot
reboot: sigusr2
.PHONY: restart
restart: sigusr2
.PHONY: test
test:
go test ./...
.PHONY: test-nocache
test-nocache:
go test -count=1 ./...
.PHONY: vet
vet:
go vet ./...
# This is designed to be compromise between being explicit and readability. We
# can allow the find to discover everything in vendor/, but then the fswatch
# invocation becomes a huge unreadable wall of text that gets dumped into the
# shell. Instead, find all our own *.go files and then just tack the vendor/
# directory on separately (fswatch will watch it recursively).
GO_FILES := $(shell find . -type f -name "*.go" ! -path "./vendor/*")
# Meant to be used in conjuction with `forego start`. When a Go file changes,
# this watch recompiles the project, then sends USR2 to the process which
# prompts Modulir to re-exec it.
.PHONY: watch-go
watch-go:
fswatch -o $(GO_FILES) vendor/ | xargs -n1 -I{} make install sigusr2
#
# Helpers
#
# Requires that variables necessary to make an AWS API call are in the
# environment.
.PHONY: check-aws-keys
check-aws-keys:
ifndef AWS_ACCESS_KEY_ID
$(error AWS_ACCESS_KEY_ID is required)
endif
ifndef AWS_SECRET_ACCESS_KEY
$(error AWS_SECRET_ACCESS_KEY is required)
endif
# Requires that variables necessary to update a CloudFront distribution are in
# the environment.
.PHONY: check-cloudfront-id
check-cloudfront-id:
ifndef CLOUDFRONT_ID
$(error CLOUDFRONT_ID is required)
endif
.PHONY: check-target-dir
check-target-dir:
ifndef TARGET_DIR
$(error TARGET_DIR is required)
endif