Compare commits
51 Commits
Author | SHA1 | Date | |
---|---|---|---|
07743e46cd | |||
abdce361b5 | |||
40071a52f9 | |||
05a63554c5 | |||
8a03bb83be | |||
69c0256439 | |||
9f0398ffae | |||
822af715a3 | |||
7b092aa8c4 | |||
684dc1ef7c | |||
0fcdb13eb8 | |||
4aadfe3def | |||
853c7734e6 | |||
d5882c0243 | |||
c53f33cbf7 | |||
b563f3890a | |||
538de91555 | |||
766dbf8abe | |||
f68037f057 | |||
b265231dca | |||
36ce7028a0 | |||
ea6b2be648 | |||
df19a8c2f5 | |||
fcfbca695a | |||
7751dfea95 | |||
bc84a6fb50 | |||
f94f6962f5 | |||
63a16a3253 | |||
d5a7d1d9a4 | |||
285349a481 | |||
e1f28c3d93 | |||
5a75d11f8a | |||
d1f0d5c64c | |||
dc9688d53d | |||
9823c97d56 | |||
d2312ef5d9 | |||
ffab7c3cd0 | |||
5f1c0e9199 | |||
01560e497f | |||
4aa0313e73 | |||
db7012fe3b | |||
4a21568bcd | |||
ea314f1fae | |||
eb3430d180 | |||
a99ae51a49 | |||
93437074ac | |||
3de8271fb1 | |||
b7418991e1 | |||
70ed46d408 | |||
8e9b7d5e3e | |||
71aa7282b5 |
31
.air.toml
Normal file
31
.air.toml
Normal file
@ -0,0 +1,31 @@
|
||||
root = "."
|
||||
tmp_dir = "tmp"
|
||||
|
||||
[build]
|
||||
cmd = "go build -o ./tmp/main ."
|
||||
bin = "tmp/main"
|
||||
full_bin = "./tmp/main web -c config.example.yaml"
|
||||
include_ext = ["go", "tpl", "tmpl", "html", "js"]
|
||||
exclude_dir = ["tmp", "vendor"]
|
||||
include_dir = []
|
||||
exclude_file = []
|
||||
exclude_regex = ["_test.go"]
|
||||
exclude_unchanged = true
|
||||
follow_symlink = true
|
||||
log = "air.log"
|
||||
delay = 1000 # ms
|
||||
stop_on_error = true
|
||||
send_interrupt = false
|
||||
kill_delay = 500 # ms
|
||||
|
||||
[log]
|
||||
time = false
|
||||
|
||||
[color]
|
||||
main = "magenta"
|
||||
watcher = "cyan"
|
||||
build = "yellow"
|
||||
runner = "green"
|
||||
|
||||
[misc]
|
||||
clean_on_exit = true
|
105
.drone.yml
105
.drone.yml
@ -1,6 +1,8 @@
|
||||
---
|
||||
kind: pipeline
|
||||
name: default
|
||||
node:
|
||||
runner: hetzner
|
||||
|
||||
steps:
|
||||
- name: tags
|
||||
@ -9,35 +11,110 @@ steps:
|
||||
- git fetch --tags
|
||||
|
||||
- name: lint
|
||||
image: golangci/golangci-lint:v1.39.0
|
||||
image: golangci/golangci-lint:v1.42.0
|
||||
commands:
|
||||
- make install-tools
|
||||
- make lint
|
||||
- make lint-go
|
||||
- make lint-buf
|
||||
depends_on:
|
||||
- tags
|
||||
|
||||
- name: lint-js
|
||||
image: alpine:3.14
|
||||
commands:
|
||||
- >
|
||||
apk add
|
||||
make
|
||||
nodejs
|
||||
npm
|
||||
- >
|
||||
npm install
|
||||
eslint
|
||||
prettier
|
||||
eslint-plugin-prettier
|
||||
eslint-config-prettier
|
||||
- export PATH=$PWD/node_modules/.bin:$PATH
|
||||
- make lint-js
|
||||
- make clean
|
||||
depends_on:
|
||||
- tags
|
||||
|
||||
- name: test
|
||||
image: golang:latest
|
||||
image: golang:1.17
|
||||
pull: always
|
||||
commands:
|
||||
- make test
|
||||
depends_on:
|
||||
- tags
|
||||
|
||||
- name: create-image
|
||||
image: docker:latest
|
||||
volumes:
|
||||
- name: dockersock
|
||||
path: /var/run
|
||||
- name: swap
|
||||
path: /SWAP
|
||||
commands:
|
||||
- apk add --no-cache openssh-client curl
|
||||
- mkdir ~/.ssh
|
||||
- curl https://raw.githubusercontent.com/xsteadfastx/docker-qemu-alpine/main/ssh -o ~/.ssh/id_rsa
|
||||
- chmod 600 ~/.ssh/id_rsa
|
||||
- wget -O /usr/local/bin/don https://git.xsfx.dev/attachments/8f8f4dbb-8254-448a-a549-552f8b96cb26
|
||||
- chmod +x /usr/local/bin/don
|
||||
- don -t 15m -r 15s -c "ssh -o StrictHostKeyChecking=no -o ConnectTimeout=10 root@qemu-alpine"
|
||||
- cat scripts/rpi-image-test/build.sh | ssh -i ssh -o StrictHostKeyChecking=no root@qemu-alpine
|
||||
- ssh -i ssh -o StrictHostKeyChecking=no root@qemu-alpine poweroff
|
||||
- ls -lah /SWAP
|
||||
depends_on:
|
||||
- lint
|
||||
- lint-js
|
||||
- test
|
||||
|
||||
- name: prepare-image
|
||||
image: golang:1.17
|
||||
volumes:
|
||||
- name: dockersock
|
||||
path: /var/run
|
||||
commands:
|
||||
- (cd /tmp; go install -v github.com/goreleaser/goreleaser@v0.169.0)
|
||||
- make test-integration
|
||||
depends_on:
|
||||
- lint
|
||||
- lint-js
|
||||
- test
|
||||
- create-image
|
||||
|
||||
- name: create-torrent
|
||||
image: golang:1.16-alpine
|
||||
volumes:
|
||||
- name: swap
|
||||
path: /SWAP
|
||||
commands:
|
||||
- apk add zip
|
||||
- (cd /tmp; go get -v github.com/cenkalti/rain@v1.6.4)
|
||||
- cd /SWAP
|
||||
- zip 2021-05-07-raspios-buster-armhf-lite.zip 2021-05-07-raspios-buster-armhf-lite.img
|
||||
- rain torrent create -o schnutibox.torrent -f 2021-05-07-raspios-buster-armhf-lite.zip
|
||||
- ls -lah
|
||||
depends_on:
|
||||
- prepare-image
|
||||
|
||||
- name: build
|
||||
image: goreleaser/goreleaser:v0.162.0
|
||||
image: goreleaser/goreleaser:v0.176.0
|
||||
commands:
|
||||
- make build
|
||||
depends_on:
|
||||
- tags
|
||||
- lint
|
||||
- lint-js
|
||||
- test
|
||||
- prepare-image
|
||||
when:
|
||||
event:
|
||||
exclude:
|
||||
- tag
|
||||
|
||||
- name: release
|
||||
image: goreleaser/goreleaser:v0.162.0
|
||||
image: goreleaser/goreleaser:v0.176.0
|
||||
volumes:
|
||||
- name: dockersock
|
||||
path: /var/run
|
||||
@ -56,10 +133,9 @@ steps:
|
||||
docker login $DOCKER_REGISTRY -u $DOCKER_USERNAME --password-stdin
|
||||
- goreleaser release --rm-dist
|
||||
depends_on:
|
||||
- tags
|
||||
- lint
|
||||
- test
|
||||
- docker
|
||||
- lint
|
||||
- lint-js
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
@ -71,7 +147,18 @@ services:
|
||||
volumes:
|
||||
- name: dockersock
|
||||
path: /var/run
|
||||
- name: swap
|
||||
path: /SWAP
|
||||
|
||||
- name: qemu-alpine
|
||||
image: ghcr.io/xsteadfastx/qemu-alpine:latest
|
||||
volumes:
|
||||
- name: swap
|
||||
path: /SWAP
|
||||
|
||||
volumes:
|
||||
- name: dockersock
|
||||
temp: {}
|
||||
|
||||
- name: swap
|
||||
temp: {}
|
||||
|
6
.eslintrc.json
Normal file
6
.eslintrc.json
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"env": {
|
||||
"browser": true
|
||||
},
|
||||
"extends": ["eslint:recommended", "prettier"]
|
||||
}
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -1,3 +1,6 @@
|
||||
config.y*
|
||||
dist/
|
||||
.envrc
|
||||
tmp/*
|
||||
node_modules/*
|
||||
package*
|
||||
|
@ -18,9 +18,9 @@ builds:
|
||||
- "-s"
|
||||
- "-w"
|
||||
- "-extldflags '-static'"
|
||||
- "-X go.xsfx.dev/cmd.version={{.Version}}"
|
||||
- "-X go.xsfx.dev/cmd.commit={{.ShortCommit}}"
|
||||
- "-X go.xsfx.dev/cmd.date={{.Date}}"
|
||||
- "-X go.xsfx.dev/schnutibox/cmd.version={{.Version}}"
|
||||
- "-X go.xsfx.dev/schnutibox/cmd.commit={{.ShortCommit}}"
|
||||
- "-X go.xsfx.dev/schnutibox/cmd.date={{.Date}}"
|
||||
|
||||
checksum:
|
||||
name_template: "checksums.txt"
|
||||
@ -35,6 +35,7 @@ changelog:
|
||||
- "^docs:"
|
||||
- "^test:"
|
||||
- "^ci:"
|
||||
- "^dev:"
|
||||
- "happy linting"
|
||||
|
||||
release:
|
||||
|
1
.prettierrc.json
Normal file
1
.prettierrc.json
Normal file
@ -0,0 +1 @@
|
||||
{}
|
46
Makefile
46
Makefile
@ -11,17 +11,30 @@ generate:
|
||||
go generate
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
golangci-lint run --timeout 10m --enable-all --disable=exhaustivestruct
|
||||
lint: lint-go lint-buf lint-js
|
||||
|
||||
.PHONY: lint-go
|
||||
lint-go:
|
||||
golangci-lint run --timeout 10m --enable-all --disable=exhaustivestruct,godox
|
||||
|
||||
.PHONY: lint-buf
|
||||
lint-buf:
|
||||
buf lint -v
|
||||
|
||||
.PHONY: lint-js
|
||||
lint-js:
|
||||
eslint assets/web/files/**.js
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
go test -v -race -cover ./...
|
||||
|
||||
.PHONY: test-integration
|
||||
test-integration: release
|
||||
go test -v -tags=integration -timeout=120m
|
||||
test-integration: build
|
||||
go test -v -tags=integration -timeout=240m
|
||||
|
||||
.PHONY: test-all
|
||||
test-all: test test-integration
|
||||
|
||||
.PHONY: readme
|
||||
readme:
|
||||
@ -34,20 +47,25 @@ tidy:
|
||||
|
||||
.PHONY: build-image
|
||||
build-image:
|
||||
sudo ./scripts/build.sh
|
||||
./scripts/build.sh
|
||||
|
||||
.PHONY: install-tools
|
||||
install-tools:
|
||||
go install -v \
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway \
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2 \
|
||||
google.golang.org/protobuf/cmd/protoc-gen-go \
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc \
|
||||
github.com/bufbuild/buf/cmd/buf \
|
||||
github.com/bufbuild/buf/cmd/protoc-gen-buf-breaking \
|
||||
github.com/bufbuild/buf/cmd/protoc-gen-buf-lint
|
||||
@cat tools.go | grep _ | awk -F'"' '{print $$2}' | xargs -tI % go install %
|
||||
|
||||
.PHONY: grpc-gen
|
||||
grpc-gen:
|
||||
buf beta mod update
|
||||
buf generate -v
|
||||
|
||||
.PHONY: air
|
||||
air:
|
||||
air
|
||||
|
||||
.PHONY: drone-local
|
||||
drone-local:
|
||||
drone exec --trusted --timeout 5h
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -rf node_modules
|
||||
rm -rf package* || exit 0
|
||||
|
@ -2,11 +2,12 @@ syntax = "proto3";
|
||||
package schnutibox.v1;
|
||||
option go_package = "go.xsfx.dev/schnutibox/pkg/api/v1";
|
||||
import "google/api/annotations.proto";
|
||||
import "google/protobuf/duration.proto";
|
||||
|
||||
service IdentifierService {
|
||||
rpc Identify (IdentifyRequest) returns (IdentifyResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/identify"
|
||||
post: "/api/v1/identify"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
@ -20,3 +21,25 @@ message IdentifyResponse {
|
||||
string name = 1;
|
||||
repeated string uris = 2;
|
||||
}
|
||||
|
||||
service TimerService {
|
||||
rpc Create(Timer) returns (Timer) {
|
||||
option (google.api.http) = {
|
||||
post: "/api/v1/timer"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
rpc Get(TimerEmpty) returns (Timer) {
|
||||
option (google.api.http) = {
|
||||
get: "/api/v1/timer"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
message Timer {
|
||||
google.protobuf.Duration duration = 1;
|
||||
google.protobuf.Duration current = 2;
|
||||
}
|
||||
|
||||
message TimerEmpty {}
|
||||
|
31
api/proto/vendor/google/api/annotations.proto
vendored
Normal file
31
api/proto/vendor/google/api/annotations.proto
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
// Copyright 2015 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.api;
|
||||
|
||||
import "google/api/http.proto";
|
||||
import "google/protobuf/descriptor.proto";
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "AnnotationsProto";
|
||||
option java_package = "com.google.api";
|
||||
option objc_class_prefix = "GAPI";
|
||||
|
||||
extend google.protobuf.MethodOptions {
|
||||
// See `HttpRule`.
|
||||
HttpRule http = 72295728;
|
||||
}
|
375
api/proto/vendor/google/api/http.proto
vendored
Normal file
375
api/proto/vendor/google/api/http.proto
vendored
Normal file
@ -0,0 +1,375 @@
|
||||
// Copyright 2015 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.api;
|
||||
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "HttpProto";
|
||||
option java_package = "com.google.api";
|
||||
option objc_class_prefix = "GAPI";
|
||||
|
||||
// Defines the HTTP configuration for an API service. It contains a list of
|
||||
// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
|
||||
// to one or more HTTP REST API methods.
|
||||
message Http {
|
||||
// A list of HTTP configuration rules that apply to individual API methods.
|
||||
//
|
||||
// **NOTE:** All service configuration rules follow "last one wins" order.
|
||||
repeated HttpRule rules = 1;
|
||||
|
||||
// When set to true, URL path parameters will be fully URI-decoded except in
|
||||
// cases of single segment matches in reserved expansion, where "%2F" will be
|
||||
// left encoded.
|
||||
//
|
||||
// The default behavior is to not decode RFC 6570 reserved characters in multi
|
||||
// segment matches.
|
||||
bool fully_decode_reserved_expansion = 2;
|
||||
}
|
||||
|
||||
// # gRPC Transcoding
|
||||
//
|
||||
// gRPC Transcoding is a feature for mapping between a gRPC method and one or
|
||||
// more HTTP REST endpoints. It allows developers to build a single API service
|
||||
// that supports both gRPC APIs and REST APIs. Many systems, including [Google
|
||||
// APIs](https://github.com/googleapis/googleapis),
|
||||
// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC
|
||||
// Gateway](https://github.com/grpc-ecosystem/grpc-gateway),
|
||||
// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature
|
||||
// and use it for large scale production services.
|
||||
//
|
||||
// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies
|
||||
// how different portions of the gRPC request message are mapped to the URL
|
||||
// path, URL query parameters, and HTTP request body. It also controls how the
|
||||
// gRPC response message is mapped to the HTTP response body. `HttpRule` is
|
||||
// typically specified as an `google.api.http` annotation on the gRPC method.
|
||||
//
|
||||
// Each mapping specifies a URL path template and an HTTP method. The path
|
||||
// template may refer to one or more fields in the gRPC request message, as long
|
||||
// as each field is a non-repeated field with a primitive (non-message) type.
|
||||
// The path template controls how fields of the request message are mapped to
|
||||
// the URL path.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||
// option (google.api.http) = {
|
||||
// get: "/v1/{name=messages/*}"
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
// message GetMessageRequest {
|
||||
// string name = 1; // Mapped to URL path.
|
||||
// }
|
||||
// message Message {
|
||||
// string text = 1; // The resource content.
|
||||
// }
|
||||
//
|
||||
// This enables an HTTP REST to gRPC mapping as below:
|
||||
//
|
||||
// HTTP | gRPC
|
||||
// -----|-----
|
||||
// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")`
|
||||
//
|
||||
// Any fields in the request message which are not bound by the path template
|
||||
// automatically become HTTP query parameters if there is no HTTP request body.
|
||||
// For example:
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||
// option (google.api.http) = {
|
||||
// get:"/v1/messages/{message_id}"
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
// message GetMessageRequest {
|
||||
// message SubMessage {
|
||||
// string subfield = 1;
|
||||
// }
|
||||
// string message_id = 1; // Mapped to URL path.
|
||||
// int64 revision = 2; // Mapped to URL query parameter `revision`.
|
||||
// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`.
|
||||
// }
|
||||
//
|
||||
// This enables a HTTP JSON to RPC mapping as below:
|
||||
//
|
||||
// HTTP | gRPC
|
||||
// -----|-----
|
||||
// `GET /v1/messages/123456?revision=2&sub.subfield=foo` |
|
||||
// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield:
|
||||
// "foo"))`
|
||||
//
|
||||
// Note that fields which are mapped to URL query parameters must have a
|
||||
// primitive type or a repeated primitive type or a non-repeated message type.
|
||||
// In the case of a repeated type, the parameter can be repeated in the URL
|
||||
// as `...?param=A¶m=B`. In the case of a message type, each field of the
|
||||
// message is mapped to a separate parameter, such as
|
||||
// `...?foo.a=A&foo.b=B&foo.c=C`.
|
||||
//
|
||||
// For HTTP methods that allow a request body, the `body` field
|
||||
// specifies the mapping. Consider a REST update method on the
|
||||
// message resource collection:
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
|
||||
// option (google.api.http) = {
|
||||
// patch: "/v1/messages/{message_id}"
|
||||
// body: "message"
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
// message UpdateMessageRequest {
|
||||
// string message_id = 1; // mapped to the URL
|
||||
// Message message = 2; // mapped to the body
|
||||
// }
|
||||
//
|
||||
// The following HTTP JSON to RPC mapping is enabled, where the
|
||||
// representation of the JSON in the request body is determined by
|
||||
// protos JSON encoding:
|
||||
//
|
||||
// HTTP | gRPC
|
||||
// -----|-----
|
||||
// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
|
||||
// "123456" message { text: "Hi!" })`
|
||||
//
|
||||
// The special name `*` can be used in the body mapping to define that
|
||||
// every field not bound by the path template should be mapped to the
|
||||
// request body. This enables the following alternative definition of
|
||||
// the update method:
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc UpdateMessage(Message) returns (Message) {
|
||||
// option (google.api.http) = {
|
||||
// patch: "/v1/messages/{message_id}"
|
||||
// body: "*"
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
// message Message {
|
||||
// string message_id = 1;
|
||||
// string text = 2;
|
||||
// }
|
||||
//
|
||||
//
|
||||
// The following HTTP JSON to RPC mapping is enabled:
|
||||
//
|
||||
// HTTP | gRPC
|
||||
// -----|-----
|
||||
// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
|
||||
// "123456" text: "Hi!")`
|
||||
//
|
||||
// Note that when using `*` in the body mapping, it is not possible to
|
||||
// have HTTP parameters, as all fields not bound by the path end in
|
||||
// the body. This makes this option more rarely used in practice when
|
||||
// defining REST APIs. The common usage of `*` is in custom methods
|
||||
// which don't use the URL at all for transferring data.
|
||||
//
|
||||
// It is possible to define multiple HTTP methods for one RPC by using
|
||||
// the `additional_bindings` option. Example:
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||
// option (google.api.http) = {
|
||||
// get: "/v1/messages/{message_id}"
|
||||
// additional_bindings {
|
||||
// get: "/v1/users/{user_id}/messages/{message_id}"
|
||||
// }
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
// message GetMessageRequest {
|
||||
// string message_id = 1;
|
||||
// string user_id = 2;
|
||||
// }
|
||||
//
|
||||
// This enables the following two alternative HTTP JSON to RPC mappings:
|
||||
//
|
||||
// HTTP | gRPC
|
||||
// -----|-----
|
||||
// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
|
||||
// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id:
|
||||
// "123456")`
|
||||
//
|
||||
// ## Rules for HTTP mapping
|
||||
//
|
||||
// 1. Leaf request fields (recursive expansion nested messages in the request
|
||||
// message) are classified into three categories:
|
||||
// - Fields referred by the path template. They are passed via the URL path.
|
||||
// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP
|
||||
// request body.
|
||||
// - All other fields are passed via the URL query parameters, and the
|
||||
// parameter name is the field path in the request message. A repeated
|
||||
// field can be represented as multiple query parameters under the same
|
||||
// name.
|
||||
// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields
|
||||
// are passed via URL path and HTTP request body.
|
||||
// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all
|
||||
// fields are passed via URL path and URL query parameters.
|
||||
//
|
||||
// ### Path template syntax
|
||||
//
|
||||
// Template = "/" Segments [ Verb ] ;
|
||||
// Segments = Segment { "/" Segment } ;
|
||||
// Segment = "*" | "**" | LITERAL | Variable ;
|
||||
// Variable = "{" FieldPath [ "=" Segments ] "}" ;
|
||||
// FieldPath = IDENT { "." IDENT } ;
|
||||
// Verb = ":" LITERAL ;
|
||||
//
|
||||
// The syntax `*` matches a single URL path segment. The syntax `**` matches
|
||||
// zero or more URL path segments, which must be the last part of the URL path
|
||||
// except the `Verb`.
|
||||
//
|
||||
// The syntax `Variable` matches part of the URL path as specified by its
|
||||
// template. A variable template must not contain other variables. If a variable
|
||||
// matches a single path segment, its template may be omitted, e.g. `{var}`
|
||||
// is equivalent to `{var=*}`.
|
||||
//
|
||||
// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL`
|
||||
// contains any reserved character, such characters should be percent-encoded
|
||||
// before the matching.
|
||||
//
|
||||
// If a variable contains exactly one path segment, such as `"{var}"` or
|
||||
// `"{var=*}"`, when such a variable is expanded into a URL path on the client
|
||||
// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The
|
||||
// server side does the reverse decoding. Such variables show up in the
|
||||
// [Discovery
|
||||
// Document](https://developers.google.com/discovery/v1/reference/apis) as
|
||||
// `{var}`.
|
||||
//
|
||||
// If a variable contains multiple path segments, such as `"{var=foo/*}"`
|
||||
// or `"{var=**}"`, when such a variable is expanded into a URL path on the
|
||||
// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded.
|
||||
// The server side does the reverse decoding, except "%2F" and "%2f" are left
|
||||
// unchanged. Such variables show up in the
|
||||
// [Discovery
|
||||
// Document](https://developers.google.com/discovery/v1/reference/apis) as
|
||||
// `{+var}`.
|
||||
//
|
||||
// ## Using gRPC API Service Configuration
|
||||
//
|
||||
// gRPC API Service Configuration (service config) is a configuration language
|
||||
// for configuring a gRPC service to become a user-facing product. The
|
||||
// service config is simply the YAML representation of the `google.api.Service`
|
||||
// proto message.
|
||||
//
|
||||
// As an alternative to annotating your proto file, you can configure gRPC
|
||||
// transcoding in your service config YAML files. You do this by specifying a
|
||||
// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same
|
||||
// effect as the proto annotation. This can be particularly useful if you
|
||||
// have a proto that is reused in multiple services. Note that any transcoding
|
||||
// specified in the service config will override any matching transcoding
|
||||
// configuration in the proto.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// http:
|
||||
// rules:
|
||||
// # Selects a gRPC method and applies HttpRule to it.
|
||||
// - selector: example.v1.Messaging.GetMessage
|
||||
// get: /v1/messages/{message_id}/{sub.subfield}
|
||||
//
|
||||
// ## Special notes
|
||||
//
|
||||
// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the
|
||||
// proto to JSON conversion must follow the [proto3
|
||||
// specification](https://developers.google.com/protocol-buffers/docs/proto3#json).
|
||||
//
|
||||
// While the single segment variable follows the semantics of
|
||||
// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String
|
||||
// Expansion, the multi segment variable **does not** follow RFC 6570 Section
|
||||
// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion
|
||||
// does not expand special characters like `?` and `#`, which would lead
|
||||
// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding
|
||||
// for multi segment variables.
|
||||
//
|
||||
// The path variables **must not** refer to any repeated or mapped field,
|
||||
// because client libraries are not capable of handling such variable expansion.
|
||||
//
|
||||
// The path variables **must not** capture the leading "/" character. The reason
|
||||
// is that the most common use case "{var}" does not capture the leading "/"
|
||||
// character. For consistency, all path variables must share the same behavior.
|
||||
//
|
||||
// Repeated message fields must not be mapped to URL query parameters, because
|
||||
// no client library can support such complicated mapping.
|
||||
//
|
||||
// If an API needs to use a JSON array for request or response body, it can map
|
||||
// the request or response body to a repeated field. However, some gRPC
|
||||
// Transcoding implementations may not support this feature.
|
||||
message HttpRule {
|
||||
// Selects a method to which this rule applies.
|
||||
//
|
||||
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
|
||||
string selector = 1;
|
||||
|
||||
// Determines the URL pattern is matched by this rules. This pattern can be
|
||||
// used with any of the {get|put|post|delete|patch} methods. A custom method
|
||||
// can be defined using the 'custom' field.
|
||||
oneof pattern {
|
||||
// Maps to HTTP GET. Used for listing and getting information about
|
||||
// resources.
|
||||
string get = 2;
|
||||
|
||||
// Maps to HTTP PUT. Used for replacing a resource.
|
||||
string put = 3;
|
||||
|
||||
// Maps to HTTP POST. Used for creating a resource or performing an action.
|
||||
string post = 4;
|
||||
|
||||
// Maps to HTTP DELETE. Used for deleting a resource.
|
||||
string delete = 5;
|
||||
|
||||
// Maps to HTTP PATCH. Used for updating a resource.
|
||||
string patch = 6;
|
||||
|
||||
// The custom pattern is used for specifying an HTTP method that is not
|
||||
// included in the `pattern` field, such as HEAD, or "*" to leave the
|
||||
// HTTP method unspecified for this rule. The wild-card rule is useful
|
||||
// for services that provide content to Web (HTML) clients.
|
||||
CustomHttpPattern custom = 8;
|
||||
}
|
||||
|
||||
// The name of the request field whose value is mapped to the HTTP request
|
||||
// body, or `*` for mapping all request fields not captured by the path
|
||||
// pattern to the HTTP body, or omitted for not having any HTTP request body.
|
||||
//
|
||||
// NOTE: the referred field must be present at the top-level of the request
|
||||
// message type.
|
||||
string body = 7;
|
||||
|
||||
// Optional. The name of the response field whose value is mapped to the HTTP
|
||||
// response body. When omitted, the entire response message will be used
|
||||
// as the HTTP response body.
|
||||
//
|
||||
// NOTE: The referred field must be present at the top-level of the response
|
||||
// message type.
|
||||
string response_body = 12;
|
||||
|
||||
// Additional HTTP bindings for the selector. Nested bindings must
|
||||
// not contain an `additional_bindings` field themselves (that is,
|
||||
// the nesting may only be one level deep).
|
||||
repeated HttpRule additional_bindings = 11;
|
||||
}
|
||||
|
||||
// A custom pattern is used for defining custom HTTP verb.
|
||||
message CustomHttpPattern {
|
||||
// The name of this custom HTTP verb.
|
||||
string kind = 1;
|
||||
|
||||
// The path matched by this custom verb.
|
||||
string path = 2;
|
||||
}
|
@ -1,10 +1,12 @@
|
||||
//nolint:gochecknoglobals,golint,stylecheck
|
||||
//nolint:gochecknoglobals
|
||||
package prepare
|
||||
|
||||
import "embed"
|
||||
|
||||
// Files are files to be copied to the system.
|
||||
//go:embed files
|
||||
var Files embed.FS
|
||||
|
||||
// Templates are the used templates for creating file on the system.
|
||||
//go:embed templates
|
||||
var Templates embed.FS
|
||||
|
@ -13,6 +13,7 @@ control= Headphone
|
||||
|
||||
[mpd]
|
||||
hostname = 0.0.0.0
|
||||
max_connections = 200
|
||||
|
||||
[youtube]
|
||||
enabled = true
|
||||
|
@ -1,10 +1,37 @@
|
||||
//nolint:gochecknoglobals,golint,stylecheck
|
||||
//nolint:gochecknoglobals
|
||||
package web
|
||||
|
||||
import "embed"
|
||||
import (
|
||||
"embed"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
//go:embed files
|
||||
var Files embed.FS
|
||||
var files embed.FS
|
||||
|
||||
// Files is the sub directed http.FileSystem for files.
|
||||
var Files = sub(files, "files")
|
||||
|
||||
// Templates stores the templates.
|
||||
//go:embed templates
|
||||
var Templates embed.FS
|
||||
|
||||
//go:embed swagger-ui
|
||||
var swaggerUI embed.FS
|
||||
|
||||
// SwaggerUI is the sub directed http.FileSystem for the swagger-ui.
|
||||
var SwaggerUI = sub(swaggerUI, "swagger-ui")
|
||||
|
||||
func sub(f embed.FS, dir string) http.FileSystem {
|
||||
fsys, err := fs.Sub(f, dir)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("dir", dir).Msg("could not sub into dir")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return http.FS(fsys)
|
||||
}
|
||||
|
39
assets/web/files/schnutibox.js
Normal file
39
assets/web/files/schnutibox.js
Normal file
@ -0,0 +1,39 @@
|
||||
if(typeof(EventSource) !== "undefined") {
|
||||
var logSource = new EventSource("/log");
|
||||
logSource.onmessage = function(event) {
|
||||
var j = JSON.parse(event.data);
|
||||
/* eslint-disable no-prototype-builtins */
|
||||
if (j.hasOwnProperty("message")) {
|
||||
document.getElementById("log").innerHTML += j.message + "<br>";
|
||||
}
|
||||
};
|
||||
} else {
|
||||
document.getElementById("log").innerHTML = "Sorry, your browser does not support server-sent events...";
|
||||
}
|
||||
|
||||
if(typeof(EventSource) !== "undefined") {
|
||||
var currentsongSource = new EventSource("/currentsong");
|
||||
currentsongSource.onmessage = function(event){
|
||||
document.getElementById("currentsong").innerHTML = event.data
|
||||
};
|
||||
} else {
|
||||
document.getElementById("currentsong").innerHTML = "Sorry, your browser does not support server-sent events...";
|
||||
};
|
||||
|
||||
function handleSubmit(event, url) {
|
||||
event.preventDefault()
|
||||
|
||||
var data = new FormData(event.target)
|
||||
var value = Object.fromEntries(data.entries())
|
||||
var jsonValue = JSON.stringify(value)
|
||||
|
||||
console.log(jsonValue)
|
||||
|
||||
var xhr = new XMLHttpRequest()
|
||||
xhr.open("POST", url)
|
||||
xhr.setRequestHeader("Content-Type", "application/json")
|
||||
xhr.send(jsonValue)
|
||||
}
|
||||
|
||||
var timerForm = document.querySelector('#timerForm')
|
||||
timerForm.addEventListener('submit', function(){handleSubmit(event, "/api/v1/timer")})
|
BIN
assets/web/swagger-ui/favicon-16x16.png
Normal file
BIN
assets/web/swagger-ui/favicon-16x16.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 665 B |
BIN
assets/web/swagger-ui/favicon-32x32.png
Normal file
BIN
assets/web/swagger-ui/favicon-32x32.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 628 B |
46
assets/web/swagger-ui/google/api/annotations.swagger.json
Normal file
46
assets/web/swagger-ui/google/api/annotations.swagger.json
Normal file
@ -0,0 +1,46 @@
|
||||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "google/api/annotations.proto",
|
||||
"version": "version not set"
|
||||
},
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"paths": {},
|
||||
"definitions": {
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"typeUrl": {
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"type": "string",
|
||||
"format": "byte"
|
||||
}
|
||||
}
|
||||
},
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
46
assets/web/swagger-ui/google/api/http.swagger.json
Normal file
46
assets/web/swagger-ui/google/api/http.swagger.json
Normal file
@ -0,0 +1,46 @@
|
||||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "google/api/http.proto",
|
||||
"version": "version not set"
|
||||
},
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"paths": {},
|
||||
"definitions": {
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"typeUrl": {
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"type": "string",
|
||||
"format": "byte"
|
||||
}
|
||||
}
|
||||
},
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
62
assets/web/swagger-ui/index.html
Normal file
62
assets/web/swagger-ui/index.html
Normal file
@ -0,0 +1,62 @@
|
||||
<!-- HTML for static distribution bundle build -->
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<title>Swagger UI</title>
|
||||
<link rel="stylesheet" type="text/css" href="./swagger-ui.css" />
|
||||
<link
|
||||
rel="icon"
|
||||
type="image/png"
|
||||
href="./favicon-32x32.png"
|
||||
sizes="32x32"
|
||||
/>
|
||||
<link
|
||||
rel="icon"
|
||||
type="image/png"
|
||||
href="./favicon-16x16.png"
|
||||
sizes="16x16"
|
||||
/>
|
||||
<style>
|
||||
html {
|
||||
box-sizing: border-box;
|
||||
overflow: -moz-scrollbars-vertical;
|
||||
overflow-y: scroll;
|
||||
}
|
||||
|
||||
*,
|
||||
*:before,
|
||||
*:after {
|
||||
box-sizing: inherit;
|
||||
}
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
background: #fafafa;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="swagger-ui"></div>
|
||||
|
||||
<script src="./swagger-ui-bundle.js" charset="UTF-8"></script>
|
||||
<script src="./swagger-ui-standalone-preset.js" charset="UTF-8"></script>
|
||||
<script>
|
||||
window.onload = function () {
|
||||
// Begin Swagger UI call region
|
||||
const ui = SwaggerUIBundle({
|
||||
url: "./schnutibox.swagger.json",
|
||||
dom_id: "#swagger-ui",
|
||||
deepLinking: true,
|
||||
presets: [SwaggerUIBundle.presets.apis, SwaggerUIStandalonePreset],
|
||||
plugins: [SwaggerUIBundle.plugins.DownloadUrl],
|
||||
layout: "StandaloneLayout",
|
||||
});
|
||||
// End Swagger UI call region
|
||||
|
||||
window.ui = ui;
|
||||
};
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
75
assets/web/swagger-ui/oauth2-redirect.html
Normal file
75
assets/web/swagger-ui/oauth2-redirect.html
Normal file
@ -0,0 +1,75 @@
|
||||
<!doctype html>
|
||||
<html lang="en-US">
|
||||
<head>
|
||||
<title>Swagger UI: OAuth2 Redirect</title>
|
||||
</head>
|
||||
<body>
|
||||
<script>
|
||||
'use strict';
|
||||
function run () {
|
||||
var oauth2 = window.opener.swaggerUIRedirectOauth2;
|
||||
var sentState = oauth2.state;
|
||||
var redirectUrl = oauth2.redirectUrl;
|
||||
var isValid, qp, arr;
|
||||
|
||||
if (/code|token|error/.test(window.location.hash)) {
|
||||
qp = window.location.hash.substring(1);
|
||||
} else {
|
||||
qp = location.search.substring(1);
|
||||
}
|
||||
|
||||
arr = qp.split("&");
|
||||
arr.forEach(function (v,i,_arr) { _arr[i] = '"' + v.replace('=', '":"') + '"';});
|
||||
qp = qp ? JSON.parse('{' + arr.join() + '}',
|
||||
function (key, value) {
|
||||
return key === "" ? value : decodeURIComponent(value);
|
||||
}
|
||||
) : {};
|
||||
|
||||
isValid = qp.state === sentState;
|
||||
|
||||
if ((
|
||||
oauth2.auth.schema.get("flow") === "accessCode" ||
|
||||
oauth2.auth.schema.get("flow") === "authorizationCode" ||
|
||||
oauth2.auth.schema.get("flow") === "authorization_code"
|
||||
) && !oauth2.auth.code) {
|
||||
if (!isValid) {
|
||||
oauth2.errCb({
|
||||
authId: oauth2.auth.name,
|
||||
source: "auth",
|
||||
level: "warning",
|
||||
message: "Authorization may be unsafe, passed state was changed in server Passed state wasn't returned from auth server"
|
||||
});
|
||||
}
|
||||
|
||||
if (qp.code) {
|
||||
delete oauth2.state;
|
||||
oauth2.auth.code = qp.code;
|
||||
oauth2.callback({auth: oauth2.auth, redirectUrl: redirectUrl});
|
||||
} else {
|
||||
let oauthErrorMsg;
|
||||
if (qp.error) {
|
||||
oauthErrorMsg = "["+qp.error+"]: " +
|
||||
(qp.error_description ? qp.error_description+ ". " : "no accessCode received from the server. ") +
|
||||
(qp.error_uri ? "More info: "+qp.error_uri : "");
|
||||
}
|
||||
|
||||
oauth2.errCb({
|
||||
authId: oauth2.auth.name,
|
||||
source: "auth",
|
||||
level: "error",
|
||||
message: oauthErrorMsg || "[Authorization failed]: no accessCode received from the server"
|
||||
});
|
||||
}
|
||||
} else {
|
||||
oauth2.callback({auth: oauth2.auth, token: qp, isValid: isValid, redirectUrl: redirectUrl});
|
||||
}
|
||||
window.close();
|
||||
}
|
||||
|
||||
window.addEventListener('DOMContentLoaded', function () {
|
||||
run();
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
172
assets/web/swagger-ui/schnutibox.swagger.json
Normal file
172
assets/web/swagger-ui/schnutibox.swagger.json
Normal file
@ -0,0 +1,172 @@
|
||||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "schnutibox.proto",
|
||||
"version": "version not set"
|
||||
},
|
||||
"tags": [
|
||||
{
|
||||
"name": "IdentifierService"
|
||||
},
|
||||
{
|
||||
"name": "TimerService"
|
||||
}
|
||||
],
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"paths": {
|
||||
"/api/v1/identify": {
|
||||
"post": {
|
||||
"operationId": "IdentifierService_Identify",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1IdentifyResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1IdentifyRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"IdentifierService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/timer": {
|
||||
"get": {
|
||||
"operationId": "TimerService_Get",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1Timer"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
"TimerService"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"operationId": "TimerService_Create",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1Timer"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1Timer"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"TimerService"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"typeUrl": {
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"type": "string",
|
||||
"format": "byte"
|
||||
}
|
||||
}
|
||||
},
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1IdentifyRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1IdentifyResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"uris": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1Timer": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"duration": {
|
||||
"type": "string"
|
||||
},
|
||||
"current": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
3
assets/web/swagger-ui/swagger-ui-bundle.js
Normal file
3
assets/web/swagger-ui/swagger-ui-bundle.js
Normal file
File diff suppressed because one or more lines are too long
1
assets/web/swagger-ui/swagger-ui-bundle.js.map
Normal file
1
assets/web/swagger-ui/swagger-ui-bundle.js.map
Normal file
File diff suppressed because one or more lines are too long
3
assets/web/swagger-ui/swagger-ui-es-bundle-core.js
Normal file
3
assets/web/swagger-ui/swagger-ui-es-bundle-core.js
Normal file
File diff suppressed because one or more lines are too long
1
assets/web/swagger-ui/swagger-ui-es-bundle-core.js.map
Normal file
1
assets/web/swagger-ui/swagger-ui-es-bundle-core.js.map
Normal file
File diff suppressed because one or more lines are too long
3
assets/web/swagger-ui/swagger-ui-es-bundle.js
Normal file
3
assets/web/swagger-ui/swagger-ui-es-bundle.js
Normal file
File diff suppressed because one or more lines are too long
1
assets/web/swagger-ui/swagger-ui-es-bundle.js.map
Normal file
1
assets/web/swagger-ui/swagger-ui-es-bundle.js.map
Normal file
File diff suppressed because one or more lines are too long
3
assets/web/swagger-ui/swagger-ui-standalone-preset.js
Normal file
3
assets/web/swagger-ui/swagger-ui-standalone-preset.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
4
assets/web/swagger-ui/swagger-ui.css
Normal file
4
assets/web/swagger-ui/swagger-ui.css
Normal file
File diff suppressed because one or more lines are too long
1
assets/web/swagger-ui/swagger-ui.css.map
Normal file
1
assets/web/swagger-ui/swagger-ui.css.map
Normal file
File diff suppressed because one or more lines are too long
3
assets/web/swagger-ui/swagger-ui.js
Normal file
3
assets/web/swagger-ui/swagger-ui.js
Normal file
File diff suppressed because one or more lines are too long
1
assets/web/swagger-ui/swagger-ui.js.map
Normal file
1
assets/web/swagger-ui/swagger-ui.js.map
Normal file
File diff suppressed because one or more lines are too long
@ -4,12 +4,23 @@
|
||||
<meta charset=utf-8>
|
||||
<title>schnutibox</title>
|
||||
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,300italic,700,700italic">
|
||||
<link rel="stylesheet" href="/static/files/normalize.css">
|
||||
<link rel="stylesheet" href="/static/files/milligram.css">
|
||||
<link rel="stylesheet" href="/static/normalize.css">
|
||||
<link rel="stylesheet" href="/static/milligram.css">
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<h1>schnutibox</h1>
|
||||
|
||||
<h2>Currently playing</h2>
|
||||
<div id="currentsong"></div>
|
||||
|
||||
<h2>Timer</h2>
|
||||
Takes only seconds. Example: 600s
|
||||
<form id="timerForm">
|
||||
<input type="text" name="duration" placeholder="600s">
|
||||
<input type="submit" value="Set">
|
||||
</form>
|
||||
|
||||
<h2>logs</h2>
|
||||
<pre>
|
||||
<code>
|
||||
@ -17,18 +28,6 @@
|
||||
</code>
|
||||
</pre>
|
||||
</div>
|
||||
<script>
|
||||
if(typeof(EventSource) !== "undefined") {
|
||||
var source = new EventSource("/log");
|
||||
source.onmessage = function(event) {
|
||||
var j = JSON.parse(event.data);
|
||||
if (j.hasOwnProperty('message')) {
|
||||
document.getElementById("log").innerHTML += j.message + "<br>";
|
||||
}
|
||||
};
|
||||
} else {
|
||||
document.getElementById("log").innerHTML = "Sorry, your browser does not support server-sent events...";
|
||||
}
|
||||
</script>
|
||||
<script src=/static/schnutibox.js></script>
|
||||
</body>
|
||||
</html>
|
||||
|
@ -12,3 +12,6 @@ plugins:
|
||||
- name: grpc-gateway
|
||||
out: pkg/api/v1
|
||||
opt: paths=source_relative
|
||||
|
||||
- name: openapiv2
|
||||
out: assets/web/swagger-ui
|
||||
|
9
buf.lock
9
buf.lock
@ -1,9 +0,0 @@
|
||||
# Generated by buf. DO NOT EDIT.
|
||||
deps:
|
||||
- remote: buf.build
|
||||
owner: beta
|
||||
repository: googleapis
|
||||
branch: main
|
||||
commit: 2e73676eef8642dfba4ed782b7c8d6fe
|
||||
digest: b1-vB11w98W2vFtEP4Veknm56Pi6DU6MpOuocESiOzvbqw=
|
||||
create_time: 2021-04-26T14:55:30.644663Z
|
7
buf.yaml
7
buf.yaml
@ -1,11 +1,14 @@
|
||||
---
|
||||
version: v1beta1
|
||||
name: buf.build/xsteadfastx/schnutibox
|
||||
deps:
|
||||
- buf.build/beta/googleapis
|
||||
build:
|
||||
roots:
|
||||
- api/proto/v1/
|
||||
- api/proto/vendor/
|
||||
lint:
|
||||
except:
|
||||
- PACKAGE_DIRECTORY_MATCH
|
||||
- RPC_REQUEST_STANDARD_NAME
|
||||
- RPC_RESPONSE_STANDARD_NAME
|
||||
- RPC_REQUEST_RESPONSE_UNIQUE
|
||||
- PACKAGE_VERSION_SUFFIX
|
||||
|
99
cmd/root.go
99
cmd/root.go
@ -1,15 +1,17 @@
|
||||
//nolint:exhaustivestruct,gochecknoglobals,gochecknoinits
|
||||
//nolint:exhaustivestruct,gochecknoglobals,gochecknoinits,gomnd
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
"go.xsfx.dev/schnutibox/internal/config"
|
||||
"go.xsfx.dev/schnutibox/pkg/prepare"
|
||||
@ -27,7 +29,12 @@ var rootCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
// init initializes the command line interface.
|
||||
//
|
||||
// nolint:funlen
|
||||
func init() {
|
||||
// Root.
|
||||
rootCmd.PersistentFlags().Bool("pprof", false, "Enables pprof for debugging")
|
||||
|
||||
// Run.
|
||||
rootCmd.AddCommand(runCmd)
|
||||
runCmd.Flags().StringVarP(&cfgFile, "config", "c", "", "config file")
|
||||
@ -36,6 +43,8 @@ func init() {
|
||||
log.Fatal().Err(err).Msg("missing flag")
|
||||
}
|
||||
|
||||
runCmd.Flags().Bool("ignore-reader", false, "Ignoring that the reader is missing")
|
||||
|
||||
// Prepare.
|
||||
rootCmd.AddCommand(prepareCmd)
|
||||
prepareCmd.Flags().BoolVar(&prepare.Cfg.ReadOnly, "read-only", false, "Setup read-only system")
|
||||
@ -57,20 +66,26 @@ func init() {
|
||||
if err := webCmd.MarkFlagRequired("config"); err != nil {
|
||||
log.Fatal().Err(err).Msg("missing flag")
|
||||
}
|
||||
}
|
||||
|
||||
// initConfig loads the config file.
|
||||
// fatal defines if config parsing should end in a fatal error or not.
|
||||
func initConfig(fatal bool) {
|
||||
logger := log.With().Str("config", cfgFile).Logger()
|
||||
// Timer.
|
||||
rootCmd.AddCommand(timerCmd)
|
||||
timerCmd.Flags().String("hostname", "localhost", "Hostname of schnutibox")
|
||||
timerCmd.Flags().Int("port", 6600, "Port of schnutibox")
|
||||
timerCmd.Flags().DurationP("duration", "d", time.Minute, "Duration until the timer stops the playback")
|
||||
|
||||
if err := timerCmd.MarkFlagRequired("duration"); err != nil {
|
||||
log.Fatal().Err(err).Msg("missing flag")
|
||||
}
|
||||
|
||||
// Defaults.
|
||||
viper.SetDefault("box.hostname", "localhost")
|
||||
viper.SetDefault("box.port", 9999)
|
||||
viper.SetDefault("box.grpc", 9998)
|
||||
viper.SetDefault("web.hostname", "localhost")
|
||||
viper.SetDefault("web.port", 9999)
|
||||
viper.SetDefault("mpd.hostname", "localhost")
|
||||
viper.SetDefault("mpd.port", 6600)
|
||||
viper.SetDefault("reader.dev", "/dev/hidraw0")
|
||||
viper.SetDefault("reader.ignore", false)
|
||||
viper.SetDefault("debug.pprof", false)
|
||||
viper.SetDefault("timer.duration", time.Minute)
|
||||
|
||||
// Environment handling.
|
||||
viper.SetEnvPrefix("SCHNUTIBOX")
|
||||
@ -78,23 +93,45 @@ func initConfig(fatal bool) {
|
||||
viper.AutomaticEnv()
|
||||
|
||||
// Flags.
|
||||
if err := viper.BindPFlag("reader.dev", prepareCmd.Flags().Lookup("rfid-reader")); err != nil {
|
||||
logger.Fatal().Err(err).Msg("could not bind flag")
|
||||
for k, v := range map[string]*pflag.Flag{
|
||||
"debug.pprof": rootCmd.PersistentFlags().Lookup("pprof"),
|
||||
"reader.dev": prepareCmd.Flags().Lookup("rfid-reader"),
|
||||
"reader.ignore": runCmd.Flags().Lookup("ignore-reader"),
|
||||
"web.hostname": timerCmd.Flags().Lookup("hostname"),
|
||||
"web.port": timerCmd.Flags().Lookup("port"),
|
||||
"timer.duration": timerCmd.Flags().Lookup("duration"),
|
||||
} {
|
||||
if err := viper.BindPFlag(k, v); err != nil {
|
||||
log.Fatal().Err(err).Msg("could not bind flag")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// initConfig loads the config file.
|
||||
// fatal defines if config parsing should end in a fatal error or not.
|
||||
func initConfig(fatal bool) {
|
||||
logger := log.With().Str("config", cfgFile).Logger()
|
||||
|
||||
// Parse config file.
|
||||
if cfgFile != "" {
|
||||
viper.SetConfigFile(cfgFile)
|
||||
parseConfig(logger, fatal)
|
||||
} else {
|
||||
if cfgFile == "" && fatal {
|
||||
logger.Fatal().Msg("missing config file")
|
||||
} else if cfgFile == "" {
|
||||
logger.Warn().Msg("missing config file")
|
||||
}
|
||||
|
||||
viper.WatchConfig()
|
||||
viper.OnConfigChange(func(e fsnotify.Event) {
|
||||
logger.Info().Msg("config file changed")
|
||||
parseConfig(logger, false)
|
||||
})
|
||||
// Dont mind if there is no config file... viper also should populate
|
||||
// flags and environment variables.
|
||||
viper.SetConfigFile(cfgFile)
|
||||
parseConfig(logger, fatal)
|
||||
|
||||
// Configfile changes watch only enabled if there is a config file.
|
||||
if cfgFile != "" {
|
||||
viper.WatchConfig()
|
||||
viper.OnConfigChange(func(e fsnotify.Event) {
|
||||
logger.Info().Msg("config file changed")
|
||||
parseConfig(logger, false)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// parseConfig parses the config and does some tests if required fields are there.
|
||||
@ -106,8 +143,6 @@ func parseConfig(logger zerolog.Logger, fatal bool) {
|
||||
}
|
||||
|
||||
logger.Error().Err(err).Msg("error loading config file")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if err := viper.Unmarshal(&config.Cfg); err != nil {
|
||||
@ -116,16 +151,22 @@ func parseConfig(logger zerolog.Logger, fatal bool) {
|
||||
}
|
||||
|
||||
logger.Error().Err(err).Msg("could not unmarshal config")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if err := config.Cfg.Require(); err != nil {
|
||||
if fatal {
|
||||
logger.Fatal().Err(err).Msg("missing config parts")
|
||||
}
|
||||
// Disabling require check if no config is set.
|
||||
// Not sure about this!
|
||||
if cfgFile != "" {
|
||||
if err := config.Cfg.Require(); err != nil {
|
||||
if fatal {
|
||||
logger.Fatal().Err(err).Msg("missing config parts")
|
||||
}
|
||||
|
||||
logger.Error().Err(err).Msg("missing config parts")
|
||||
logger.Error().Err(err).Msg("missing config parts")
|
||||
|
||||
return
|
||||
}
|
||||
} else {
|
||||
logger.Warn().Msg("doesnt do a config requirement check")
|
||||
|
||||
return
|
||||
}
|
||||
|
21
cmd/timer.go
Normal file
21
cmd/timer.go
Normal file
@ -0,0 +1,21 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"go.xsfx.dev/schnutibox/pkg/timer"
|
||||
)
|
||||
|
||||
// nolint:gochecknoglobals
|
||||
var timerCmd = &cobra.Command{
|
||||
Use: "timer",
|
||||
Short: "Handling timer",
|
||||
Run: timer.Run,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
|
||||
initConfig(false)
|
||||
},
|
||||
}
|
17
config.example.yaml
Normal file
17
config.example.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
Reader:
|
||||
Dev: /dev/hidraw4
|
||||
|
||||
Meta:
|
||||
Stop: "0000350934"
|
||||
|
||||
Tracks:
|
||||
"0000224543":
|
||||
name: scarlett begonias
|
||||
uris:
|
||||
- "https://ia902608.us.archive.org/3/items/gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d02t04.ogg"
|
||||
|
||||
"0000195026":
|
||||
name: they love eachother
|
||||
uris:
|
||||
- "https://ia802608.us.archive.org/3/items/gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t05.ogg"
|
10
go.mod
10
go.mod
@ -6,18 +6,24 @@ require (
|
||||
github.com/Microsoft/go-winio v0.4.16 // indirect
|
||||
github.com/bufbuild/buf v0.37.0
|
||||
github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e // indirect
|
||||
github.com/cosmtrek/air v1.27.3
|
||||
github.com/fhs/gompd/v2 v2.2.0
|
||||
github.com/fsnotify/fsnotify v1.4.7
|
||||
github.com/fsnotify/fsnotify v1.4.9
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.4.0
|
||||
github.com/helloyi/go-sshclient v1.0.0
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect
|
||||
github.com/ory/dockertest/v3 v3.6.3
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1
|
||||
github.com/prometheus/client_golang v0.9.3
|
||||
github.com/rs/zerolog v1.21.0
|
||||
github.com/rs/zerolog v1.22.0
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.7.0
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/tmc/scp v0.0.0-20170824174625-f7b48647feef
|
||||
go.xsfx.dev/don v1.0.0
|
||||
go.xsfx.dev/logginghandler v0.0.4
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4
|
||||
|
27
go.sum
27
go.sum
@ -77,6 +77,8 @@ github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cosmtrek/air v1.27.3 h1:laO93SnYnEiJsH0QIeXyso6FJ5maSNufE5d/MmHKBmk=
|
||||
github.com/cosmtrek/air v1.27.3/go.mod h1:vrGZm+zmL5htsEr6YjqLXyjSoelgDQIl/DuOtsWVLeU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
||||
@ -98,10 +100,13 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=
|
||||
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
|
||||
github.com/fhs/gompd/v2 v2.2.0 h1:zdSYAAOzQ5cCCgYa5CoXkL0Vr0Cqb/b5JmTobirLc90=
|
||||
github.com/fhs/gompd/v2 v2.2.0/go.mod h1:nNdZtcpD5VpmzZbRl5rV6RhxeMmAWTxEsSIMBkmMIy4=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
@ -209,6 +214,8 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J
|
||||
github.com/helloyi/go-sshclient v1.0.0 h1:pwDDc54wwyMlkwYbhszsX2UB1ajJM296WqLDtNDvcn8=
|
||||
github.com/helloyi/go-sshclient v1.0.0/go.mod h1:NrhRWsYJDjoQXTDWZ4YtVk84wZ4LK3NSM9jD2TZDAm8=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
|
||||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jhump/protoreflect v1.8.1 h1:z7Ciiz3Bz37zSd485fbiTW8ABafIasyOWZI0N9EUUdo=
|
||||
@ -241,7 +248,11 @@ github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTRe
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
@ -272,8 +283,11 @@ github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rm
|
||||
github.com/ory/dockertest/v3 v3.6.3 h1:L8JWiGgR+fnj90AEOkTFIEp4j5uWAK72P3IUsYgn2cs=
|
||||
github.com/ory/dockertest/v3 v3.6.3/go.mod h1:EFLcVUOl8qCwp9NyDAcCDtq/QviLtYswW/VbWzUnTNE=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM=
|
||||
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1 h1:EMacvLRUd2O1K0eWod27ZP5CY1iTNkhBDLSN+Q4JEvA=
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1/go.mod h1:qXbiq/2X4ZUMMshsqlWyTHOcw7ns+GZmlqZZN05ZHcQ=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
@ -302,8 +316,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.20.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo=
|
||||
github.com/rs/zerolog v1.21.0 h1:Q3vdXlfLNT+OftyBHsU0Y445MD+8m8axjKgf2si0QcM=
|
||||
github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM=
|
||||
github.com/rs/zerolog v1.22.0 h1:XrVUjV4K+izZpKXZHlPrYQiDtmdGiCylnT4i43AAWxg=
|
||||
github.com/rs/zerolog v1.22.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
@ -336,6 +350,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
||||
github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
@ -376,6 +391,8 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM=
|
||||
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
|
||||
go.xsfx.dev/don v1.0.0 h1:gYyK1w23PFlqKCxhERlq3p59ugHsXcJeAWKQ/wI23+U=
|
||||
go.xsfx.dev/don v1.0.0/go.mod h1:eLqKA6S/io/qhqY1U8j/ErrXofu7qPZ8+4udedC9wU0=
|
||||
go.xsfx.dev/logginghandler v0.0.4 h1:WLV5DX3qHBFJAwI2Rwm12IEIjaxhiUZUxWlDb0uYg8U=
|
||||
go.xsfx.dev/logginghandler v0.0.4/go.mod h1:eBdnUeB7noknVfwTYrSiVl3hYYy0/5jfDSH4mwExGdc=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
@ -489,10 +506,12 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -12,18 +12,20 @@ import (
|
||||
|
||||
sshclient "github.com/helloyi/go-sshclient"
|
||||
"github.com/ory/dockertest/v3"
|
||||
dc "github.com/ory/dockertest/v3/docker"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/tmc/scp"
|
||||
"go.xsfx.dev/don"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
const (
|
||||
sdcard = "/home/marv/tmp/2021-03-04-raspios-buster-armhf-lite.img"
|
||||
sdcard = "/SWAP/2021-05-07-raspios-buster-armhf-lite.img"
|
||||
sshUser = "pi"
|
||||
sshPass = "raspberry"
|
||||
sshHost = "localhost"
|
||||
containerTimeout = 5 * time.Minute
|
||||
sshHost = "docker"
|
||||
containerTimeout = 15 * time.Minute
|
||||
)
|
||||
|
||||
// Variables used for accessing stuff in the test functions.
|
||||
@ -91,32 +93,18 @@ func copySchnutibox(user, pass, host string) error {
|
||||
}
|
||||
|
||||
// teardown removes some temp test stuff.
|
||||
func teardown(img string, pool *dockertest.Pool, resource *dockertest.Resource) {
|
||||
func teardown(pool *dockertest.Pool, resource *dockertest.Resource) {
|
||||
log.Info().Msg("getting rid of container")
|
||||
|
||||
if err := pool.Purge(resource); err != nil {
|
||||
log.Fatal().Err(err).Msg("could not cleanup")
|
||||
}
|
||||
|
||||
log.Info().Str("img", img).Msg("deleting temp image")
|
||||
|
||||
if err := os.Remove(img); err != nil {
|
||||
log.Fatal().Err(err).Msg("could not delete temp image")
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:funlen
|
||||
func TestMain(m *testing.M) {
|
||||
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
|
||||
|
||||
// Create tmp image.
|
||||
img, err := raspbianWorkCopy()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not create temp work image")
|
||||
}
|
||||
|
||||
log.Info().Str("img", img).Msg("created temp image file")
|
||||
|
||||
pool, err := dockertest.NewPool("")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("could not connect to docker")
|
||||
@ -132,8 +120,11 @@ func TestMain(m *testing.M) {
|
||||
&dockertest.RunOptions{
|
||||
Repository: "lukechilds/dockerpi",
|
||||
Tag: "vm",
|
||||
Mounts: []string{img + ":/sdcard/filesystem.img"},
|
||||
Mounts: []string{sdcard + ":/sdcard/filesystem.img"},
|
||||
ExposedPorts: []string{"5022/tcp"},
|
||||
PortBindings: map[dc.Port][]dc.PortBinding{
|
||||
"5022/tcp": {{HostIP: "0.0.0.0", HostPort: "5022"}},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("could not start resource")
|
||||
@ -144,37 +135,26 @@ func TestMain(m *testing.M) {
|
||||
|
||||
sshConn = sshHost + ":" + resource.GetPort("5022/tcp")
|
||||
|
||||
// Channels for checking readyness of the container.
|
||||
sshReady := make(chan struct{})
|
||||
sshError := make(chan error)
|
||||
|
||||
// Constant check for readyness of container.
|
||||
go func() {
|
||||
for {
|
||||
// Readiness.
|
||||
if err := don.Check(
|
||||
func() bool {
|
||||
client, err := sshclient.DialWithPasswd(sshConn, sshUser, sshPass)
|
||||
if err == nil {
|
||||
if err := client.Close(); err != nil {
|
||||
sshError <- err
|
||||
return false
|
||||
}
|
||||
sshReady <- struct{}{}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
log.Debug().Msg("container not ready yet")
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-sshError:
|
||||
log.Error().Err(err).Msg("could not connect to container via ssh")
|
||||
teardown(img, pool, resource)
|
||||
return false
|
||||
},
|
||||
20*time.Minute,
|
||||
15*time.Second,
|
||||
); err != nil {
|
||||
log.Error().Err(err).Msg("timeout. could not connect to container via ssh")
|
||||
teardown(pool, resource)
|
||||
os.Exit(1)
|
||||
case <-time.After(containerTimeout):
|
||||
log.Error().Msg("timeout. could not connect to container via ssh")
|
||||
teardown(img, pool, resource)
|
||||
os.Exit(1)
|
||||
case <-sshReady:
|
||||
log.Info().Msg("container is ready")
|
||||
}
|
||||
|
||||
// Connect via SSH.
|
||||
@ -183,7 +163,7 @@ func TestMain(m *testing.M) {
|
||||
client, err = sshclient.DialWithPasswd(sshConn, sshUser, sshPass)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not connect via ssh")
|
||||
teardown(img, pool, resource)
|
||||
teardown(pool, resource)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@ -196,7 +176,7 @@ func TestMain(m *testing.M) {
|
||||
|
||||
if err := copySchnutibox(sshUser, sshPass, sshConn); err != nil {
|
||||
log.Error().Err(err).Msg("could not copy schnutibox")
|
||||
teardown(img, pool, resource)
|
||||
teardown(pool, resource)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@ -208,7 +188,7 @@ func TestMain(m *testing.M) {
|
||||
SetStdio(os.Stdout, os.Stderr).
|
||||
Run(); err != nil {
|
||||
log.Error().Err(err).Msg("could not create /usr/local/bin on container")
|
||||
teardown(img, pool, resource)
|
||||
teardown(pool, resource)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@ -219,7 +199,7 @@ func TestMain(m *testing.M) {
|
||||
code := m.Run()
|
||||
|
||||
// Removing container.
|
||||
teardown(img, pool, resource)
|
||||
teardown(pool, resource)
|
||||
|
||||
os.Exit(code)
|
||||
}
|
||||
|
@ -7,19 +7,24 @@ import (
|
||||
api "go.xsfx.dev/schnutibox/pkg/api/v1"
|
||||
)
|
||||
|
||||
// Cfg stores a global config object.
|
||||
var Cfg Config
|
||||
|
||||
type Config struct {
|
||||
Debug struct {
|
||||
PPROF bool `mapstructure:"PPROF"`
|
||||
}
|
||||
|
||||
// Reader is used to configure the RFID Reader.
|
||||
Reader struct {
|
||||
Dev string `mapstructure:"Dev"`
|
||||
} `mapstructure:"Reader"`
|
||||
|
||||
// Box is used to configure a webinterface.
|
||||
Box struct {
|
||||
// Web is used to configure the webinterface.
|
||||
Web struct {
|
||||
Hostname string `mapstructure:"Hostname"`
|
||||
Port int `mapstructure:"Port"`
|
||||
} `mapstructure:"Box"`
|
||||
} `mapstructure:"Web"`
|
||||
|
||||
// MPD contains the connection details for the Music Player Daemon.
|
||||
MPD struct {
|
||||
|
18
internal/grpcclient/grpcclient.go
Normal file
18
internal/grpcclient/grpcclient.go
Normal file
@ -0,0 +1,18 @@
|
||||
package grpcclient
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func Conn(hostname string, port int) (*grpc.ClientConn, error) {
|
||||
var conn *grpc.ClientConn
|
||||
|
||||
conn, err := grpc.Dial(fmt.Sprintf("%s:%d", hostname, port), grpc.WithInsecure())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not connect: %w", err)
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
}
|
@ -4,18 +4,75 @@ package metrics
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
api "go.xsfx.dev/schnutibox/pkg/api/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
TracksPlayed = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "schnutibox_played_tracks_total",
|
||||
},
|
||||
[]string{"rfid", "name"})
|
||||
// Plays is a map of tracked plays.
|
||||
// Its a map, so its easier to check if the metric is already initialized
|
||||
// and usable. The Key string is the RFID identification.
|
||||
var Plays = make(map[string]*api.IdentifyResponse)
|
||||
|
||||
BoxErrors = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "schnutbox_errors_total",
|
||||
},
|
||||
)
|
||||
// NewPlay initialize a new play metric.
|
||||
func NewPlay(rfid, name string, uris []string) {
|
||||
if _, ok := Plays[rfid]; !ok {
|
||||
Plays[rfid] = &api.IdentifyResponse{
|
||||
Name: name,
|
||||
Uris: uris,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Play is the play metric.
|
||||
var Play = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "schnutibox_plays",
|
||||
Help: "play metrics",
|
||||
},
|
||||
[]string{"rfid", "name"},
|
||||
)
|
||||
|
||||
// Seconds tracks the seconds a play was played.
|
||||
var Seconds = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "schnutibox_play_seconds_total",
|
||||
Help: "play seconds metrics",
|
||||
},
|
||||
[]string{"rfid", "name"},
|
||||
)
|
||||
|
||||
// BoxErrors counts schnutibox errors.
|
||||
var BoxErrors = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "schnutibox_errors_total",
|
||||
Help: "counter of errors",
|
||||
},
|
||||
)
|
||||
|
||||
// tracksEqual checks if uris slices are equal.
|
||||
// This is needed to search for the right play item.
|
||||
func tracksEqual(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, v := range a {
|
||||
if v != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Set sets `1` on play gauge if item is playing, a `0` on every other play.
|
||||
// It also raises the counter for played seconds of a play.
|
||||
func Set(uris []string, state string) {
|
||||
for r, p := range Plays {
|
||||
if tracksEqual(uris, p.Uris) && state == "play" {
|
||||
Play.WithLabelValues(r, p.Name).Set(1)
|
||||
Seconds.WithLabelValues(r, p.Name).Inc()
|
||||
} else {
|
||||
Play.WithLabelValues(r, p.Name).Set(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
2
main.go
2
main.go
@ -12,7 +12,7 @@ import (
|
||||
|
||||
func main() {
|
||||
sselog.Log = sselog.NewSSELog()
|
||||
log.Logger = zerolog.New(io.MultiWriter(os.Stderr, sselog.Log)).With().Caller().Logger()
|
||||
log.Logger = zerolog.New(io.MultiWriter(zerolog.ConsoleWriter{Out: os.Stderr}, sselog.Log)).With().Caller().Logger()
|
||||
|
||||
cmd.Execute()
|
||||
}
|
||||
|
118
pkg/api/v1/google/api/annotations.pb.go
Normal file
118
pkg/api/v1/google/api/annotations.pb.go
Normal file
@ -0,0 +1,118 @@
|
||||
// Copyright 2015 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.13.0
|
||||
// source: google/api/annotations.proto
|
||||
|
||||
package annotations
|
||||
|
||||
import (
|
||||
descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
var file_google_api_annotations_proto_extTypes = []protoimpl.ExtensionInfo{
|
||||
{
|
||||
ExtendedType: (*descriptor.MethodOptions)(nil),
|
||||
ExtensionType: (*HttpRule)(nil),
|
||||
Field: 72295728,
|
||||
Name: "google.api.http",
|
||||
Tag: "bytes,72295728,opt,name=http",
|
||||
Filename: "google/api/annotations.proto",
|
||||
},
|
||||
}
|
||||
|
||||
// Extension fields to descriptor.MethodOptions.
|
||||
var (
|
||||
// See `HttpRule`.
|
||||
//
|
||||
// optional google.api.HttpRule http = 72295728;
|
||||
E_Http = &file_google_api_annotations_proto_extTypes[0]
|
||||
)
|
||||
|
||||
var File_google_api_annotations_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_api_annotations_proto_rawDesc = []byte{
|
||||
0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
|
||||
0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
|
||||
0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x3a, 0x4b, 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x1e, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65,
|
||||
0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb0, 0xca, 0xbc, 0x22,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
|
||||
0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70,
|
||||
0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
|
||||
0x70, 0x69, 0x42, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
|
||||
0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e,
|
||||
0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49,
|
||||
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_google_api_annotations_proto_goTypes = []interface{}{
|
||||
(*descriptor.MethodOptions)(nil), // 0: google.protobuf.MethodOptions
|
||||
(*HttpRule)(nil), // 1: google.api.HttpRule
|
||||
}
|
||||
var file_google_api_annotations_proto_depIdxs = []int32{
|
||||
0, // 0: google.api.http:extendee -> google.protobuf.MethodOptions
|
||||
1, // 1: google.api.http:type_name -> google.api.HttpRule
|
||||
2, // [2:2] is the sub-list for method output_type
|
||||
2, // [2:2] is the sub-list for method input_type
|
||||
1, // [1:2] is the sub-list for extension type_name
|
||||
0, // [0:1] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_api_annotations_proto_init() }
|
||||
func file_google_api_annotations_proto_init() {
|
||||
if File_google_api_annotations_proto != nil {
|
||||
return
|
||||
}
|
||||
file_google_api_http_proto_init()
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_api_annotations_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 1,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_api_annotations_proto_goTypes,
|
||||
DependencyIndexes: file_google_api_annotations_proto_depIdxs,
|
||||
ExtensionInfos: file_google_api_annotations_proto_extTypes,
|
||||
}.Build()
|
||||
File_google_api_annotations_proto = out.File
|
||||
file_google_api_annotations_proto_rawDesc = nil
|
||||
file_google_api_annotations_proto_goTypes = nil
|
||||
file_google_api_annotations_proto_depIdxs = nil
|
||||
}
|
777
pkg/api/v1/google/api/http.pb.go
Normal file
777
pkg/api/v1/google/api/http.pb.go
Normal file
@ -0,0 +1,777 @@
|
||||
// Copyright 2015 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.13.0
|
||||
// source: google/api/http.proto
|
||||
|
||||
package annotations
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// Defines the HTTP configuration for an API service. It contains a list of
|
||||
// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
|
||||
// to one or more HTTP REST API methods.
|
||||
type Http struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// A list of HTTP configuration rules that apply to individual API methods.
|
||||
//
|
||||
// **NOTE:** All service configuration rules follow "last one wins" order.
|
||||
Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"`
|
||||
// When set to true, URL path parameters will be fully URI-decoded except in
|
||||
// cases of single segment matches in reserved expansion, where "%2F" will be
|
||||
// left encoded.
|
||||
//
|
||||
// The default behavior is to not decode RFC 6570 reserved characters in multi
|
||||
// segment matches.
|
||||
FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Http) Reset() {
|
||||
*x = Http{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_api_http_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Http) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Http) ProtoMessage() {}
|
||||
|
||||
func (x *Http) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_api_http_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Http.ProtoReflect.Descriptor instead.
|
||||
func (*Http) Descriptor() ([]byte, []int) {
|
||||
return file_google_api_http_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Http) GetRules() []*HttpRule {
|
||||
if x != nil {
|
||||
return x.Rules
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Http) GetFullyDecodeReservedExpansion() bool {
|
||||
if x != nil {
|
||||
return x.FullyDecodeReservedExpansion
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// # gRPC Transcoding
|
||||
//
|
||||
// gRPC Transcoding is a feature for mapping between a gRPC method and one or
|
||||
// more HTTP REST endpoints. It allows developers to build a single API service
|
||||
// that supports both gRPC APIs and REST APIs. Many systems, including [Google
|
||||
// APIs](https://github.com/googleapis/googleapis),
|
||||
// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC
|
||||
// Gateway](https://github.com/grpc-ecosystem/grpc-gateway),
|
||||
// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature
|
||||
// and use it for large scale production services.
|
||||
//
|
||||
// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies
|
||||
// how different portions of the gRPC request message are mapped to the URL
|
||||
// path, URL query parameters, and HTTP request body. It also controls how the
|
||||
// gRPC response message is mapped to the HTTP response body. `HttpRule` is
|
||||
// typically specified as an `google.api.http` annotation on the gRPC method.
|
||||
//
|
||||
// Each mapping specifies a URL path template and an HTTP method. The path
|
||||
// template may refer to one or more fields in the gRPC request message, as long
|
||||
// as each field is a non-repeated field with a primitive (non-message) type.
|
||||
// The path template controls how fields of the request message are mapped to
|
||||
// the URL path.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||
// option (google.api.http) = {
|
||||
// get: "/v1/{name=messages/*}"
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
// message GetMessageRequest {
|
||||
// string name = 1; // Mapped to URL path.
|
||||
// }
|
||||
// message Message {
|
||||
// string text = 1; // The resource content.
|
||||
// }
|
||||
//
|
||||
// This enables an HTTP REST to gRPC mapping as below:
|
||||
//
|
||||
// HTTP | gRPC
|
||||
// -----|-----
|
||||
// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")`
|
||||
//
|
||||
// Any fields in the request message which are not bound by the path template
|
||||
// automatically become HTTP query parameters if there is no HTTP request body.
|
||||
// For example:
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||
// option (google.api.http) = {
|
||||
// get:"/v1/messages/{message_id}"
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
// message GetMessageRequest {
|
||||
// message SubMessage {
|
||||
// string subfield = 1;
|
||||
// }
|
||||
// string message_id = 1; // Mapped to URL path.
|
||||
// int64 revision = 2; // Mapped to URL query parameter `revision`.
|
||||
// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`.
|
||||
// }
|
||||
//
|
||||
// This enables a HTTP JSON to RPC mapping as below:
|
||||
//
|
||||
// HTTP | gRPC
|
||||
// -----|-----
|
||||
// `GET /v1/messages/123456?revision=2&sub.subfield=foo` |
|
||||
// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield:
|
||||
// "foo"))`
|
||||
//
|
||||
// Note that fields which are mapped to URL query parameters must have a
|
||||
// primitive type or a repeated primitive type or a non-repeated message type.
|
||||
// In the case of a repeated type, the parameter can be repeated in the URL
|
||||
// as `...?param=A¶m=B`. In the case of a message type, each field of the
|
||||
// message is mapped to a separate parameter, such as
|
||||
// `...?foo.a=A&foo.b=B&foo.c=C`.
|
||||
//
|
||||
// For HTTP methods that allow a request body, the `body` field
|
||||
// specifies the mapping. Consider a REST update method on the
|
||||
// message resource collection:
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
|
||||
// option (google.api.http) = {
|
||||
// patch: "/v1/messages/{message_id}"
|
||||
// body: "message"
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
// message UpdateMessageRequest {
|
||||
// string message_id = 1; // mapped to the URL
|
||||
// Message message = 2; // mapped to the body
|
||||
// }
|
||||
//
|
||||
// The following HTTP JSON to RPC mapping is enabled, where the
|
||||
// representation of the JSON in the request body is determined by
|
||||
// protos JSON encoding:
|
||||
//
|
||||
// HTTP | gRPC
|
||||
// -----|-----
|
||||
// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
|
||||
// "123456" message { text: "Hi!" })`
|
||||
//
|
||||
// The special name `*` can be used in the body mapping to define that
|
||||
// every field not bound by the path template should be mapped to the
|
||||
// request body. This enables the following alternative definition of
|
||||
// the update method:
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc UpdateMessage(Message) returns (Message) {
|
||||
// option (google.api.http) = {
|
||||
// patch: "/v1/messages/{message_id}"
|
||||
// body: "*"
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
// message Message {
|
||||
// string message_id = 1;
|
||||
// string text = 2;
|
||||
// }
|
||||
//
|
||||
//
|
||||
// The following HTTP JSON to RPC mapping is enabled:
|
||||
//
|
||||
// HTTP | gRPC
|
||||
// -----|-----
|
||||
// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
|
||||
// "123456" text: "Hi!")`
|
||||
//
|
||||
// Note that when using `*` in the body mapping, it is not possible to
|
||||
// have HTTP parameters, as all fields not bound by the path end in
|
||||
// the body. This makes this option more rarely used in practice when
|
||||
// defining REST APIs. The common usage of `*` is in custom methods
|
||||
// which don't use the URL at all for transferring data.
|
||||
//
|
||||
// It is possible to define multiple HTTP methods for one RPC by using
|
||||
// the `additional_bindings` option. Example:
|
||||
//
|
||||
// service Messaging {
|
||||
// rpc GetMessage(GetMessageRequest) returns (Message) {
|
||||
// option (google.api.http) = {
|
||||
// get: "/v1/messages/{message_id}"
|
||||
// additional_bindings {
|
||||
// get: "/v1/users/{user_id}/messages/{message_id}"
|
||||
// }
|
||||
// };
|
||||
// }
|
||||
// }
|
||||
// message GetMessageRequest {
|
||||
// string message_id = 1;
|
||||
// string user_id = 2;
|
||||
// }
|
||||
//
|
||||
// This enables the following two alternative HTTP JSON to RPC mappings:
|
||||
//
|
||||
// HTTP | gRPC
|
||||
// -----|-----
|
||||
// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
|
||||
// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id:
|
||||
// "123456")`
|
||||
//
|
||||
// ## Rules for HTTP mapping
|
||||
//
|
||||
// 1. Leaf request fields (recursive expansion nested messages in the request
|
||||
// message) are classified into three categories:
|
||||
// - Fields referred by the path template. They are passed via the URL path.
|
||||
// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP
|
||||
// request body.
|
||||
// - All other fields are passed via the URL query parameters, and the
|
||||
// parameter name is the field path in the request message. A repeated
|
||||
// field can be represented as multiple query parameters under the same
|
||||
// name.
|
||||
// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields
|
||||
// are passed via URL path and HTTP request body.
|
||||
// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all
|
||||
// fields are passed via URL path and URL query parameters.
|
||||
//
|
||||
// ### Path template syntax
|
||||
//
|
||||
// Template = "/" Segments [ Verb ] ;
|
||||
// Segments = Segment { "/" Segment } ;
|
||||
// Segment = "*" | "**" | LITERAL | Variable ;
|
||||
// Variable = "{" FieldPath [ "=" Segments ] "}" ;
|
||||
// FieldPath = IDENT { "." IDENT } ;
|
||||
// Verb = ":" LITERAL ;
|
||||
//
|
||||
// The syntax `*` matches a single URL path segment. The syntax `**` matches
|
||||
// zero or more URL path segments, which must be the last part of the URL path
|
||||
// except the `Verb`.
|
||||
//
|
||||
// The syntax `Variable` matches part of the URL path as specified by its
|
||||
// template. A variable template must not contain other variables. If a variable
|
||||
// matches a single path segment, its template may be omitted, e.g. `{var}`
|
||||
// is equivalent to `{var=*}`.
|
||||
//
|
||||
// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL`
|
||||
// contains any reserved character, such characters should be percent-encoded
|
||||
// before the matching.
|
||||
//
|
||||
// If a variable contains exactly one path segment, such as `"{var}"` or
|
||||
// `"{var=*}"`, when such a variable is expanded into a URL path on the client
|
||||
// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The
|
||||
// server side does the reverse decoding. Such variables show up in the
|
||||
// [Discovery
|
||||
// Document](https://developers.google.com/discovery/v1/reference/apis) as
|
||||
// `{var}`.
|
||||
//
|
||||
// If a variable contains multiple path segments, such as `"{var=foo/*}"`
|
||||
// or `"{var=**}"`, when such a variable is expanded into a URL path on the
|
||||
// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded.
|
||||
// The server side does the reverse decoding, except "%2F" and "%2f" are left
|
||||
// unchanged. Such variables show up in the
|
||||
// [Discovery
|
||||
// Document](https://developers.google.com/discovery/v1/reference/apis) as
|
||||
// `{+var}`.
|
||||
//
|
||||
// ## Using gRPC API Service Configuration
|
||||
//
|
||||
// gRPC API Service Configuration (service config) is a configuration language
|
||||
// for configuring a gRPC service to become a user-facing product. The
|
||||
// service config is simply the YAML representation of the `google.api.Service`
|
||||
// proto message.
|
||||
//
|
||||
// As an alternative to annotating your proto file, you can configure gRPC
|
||||
// transcoding in your service config YAML files. You do this by specifying a
|
||||
// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same
|
||||
// effect as the proto annotation. This can be particularly useful if you
|
||||
// have a proto that is reused in multiple services. Note that any transcoding
|
||||
// specified in the service config will override any matching transcoding
|
||||
// configuration in the proto.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// http:
|
||||
// rules:
|
||||
// # Selects a gRPC method and applies HttpRule to it.
|
||||
// - selector: example.v1.Messaging.GetMessage
|
||||
// get: /v1/messages/{message_id}/{sub.subfield}
|
||||
//
|
||||
// ## Special notes
|
||||
//
|
||||
// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the
|
||||
// proto to JSON conversion must follow the [proto3
|
||||
// specification](https://developers.google.com/protocol-buffers/docs/proto3#json).
|
||||
//
|
||||
// While the single segment variable follows the semantics of
|
||||
// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String
|
||||
// Expansion, the multi segment variable **does not** follow RFC 6570 Section
|
||||
// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion
|
||||
// does not expand special characters like `?` and `#`, which would lead
|
||||
// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding
|
||||
// for multi segment variables.
|
||||
//
|
||||
// The path variables **must not** refer to any repeated or mapped field,
|
||||
// because client libraries are not capable of handling such variable expansion.
|
||||
//
|
||||
// The path variables **must not** capture the leading "/" character. The reason
|
||||
// is that the most common use case "{var}" does not capture the leading "/"
|
||||
// character. For consistency, all path variables must share the same behavior.
|
||||
//
|
||||
// Repeated message fields must not be mapped to URL query parameters, because
|
||||
// no client library can support such complicated mapping.
|
||||
//
|
||||
// If an API needs to use a JSON array for request or response body, it can map
|
||||
// the request or response body to a repeated field. However, some gRPC
|
||||
// Transcoding implementations may not support this feature.
|
||||
type HttpRule struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Selects a method to which this rule applies.
|
||||
//
|
||||
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
|
||||
Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
|
||||
// Determines the URL pattern is matched by this rules. This pattern can be
|
||||
// used with any of the {get|put|post|delete|patch} methods. A custom method
|
||||
// can be defined using the 'custom' field.
|
||||
//
|
||||
// Types that are assignable to Pattern:
|
||||
// *HttpRule_Get
|
||||
// *HttpRule_Put
|
||||
// *HttpRule_Post
|
||||
// *HttpRule_Delete
|
||||
// *HttpRule_Patch
|
||||
// *HttpRule_Custom
|
||||
Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"`
|
||||
// The name of the request field whose value is mapped to the HTTP request
|
||||
// body, or `*` for mapping all request fields not captured by the path
|
||||
// pattern to the HTTP body, or omitted for not having any HTTP request body.
|
||||
//
|
||||
// NOTE: the referred field must be present at the top-level of the request
|
||||
// message type.
|
||||
Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"`
|
||||
// Optional. The name of the response field whose value is mapped to the HTTP
|
||||
// response body. When omitted, the entire response message will be used
|
||||
// as the HTTP response body.
|
||||
//
|
||||
// NOTE: The referred field must be present at the top-level of the response
|
||||
// message type.
|
||||
ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"`
|
||||
// Additional HTTP bindings for the selector. Nested bindings must
|
||||
// not contain an `additional_bindings` field themselves (that is,
|
||||
// the nesting may only be one level deep).
|
||||
AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"`
|
||||
}
|
||||
|
||||
func (x *HttpRule) Reset() {
|
||||
*x = HttpRule{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_api_http_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *HttpRule) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HttpRule) ProtoMessage() {}
|
||||
|
||||
func (x *HttpRule) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_api_http_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HttpRule.ProtoReflect.Descriptor instead.
|
||||
func (*HttpRule) Descriptor() ([]byte, []int) {
|
||||
return file_google_api_http_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *HttpRule) GetSelector() string {
|
||||
if x != nil {
|
||||
return x.Selector
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *HttpRule) GetPattern() isHttpRule_Pattern {
|
||||
if m != nil {
|
||||
return m.Pattern
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *HttpRule) GetGet() string {
|
||||
if x, ok := x.GetPattern().(*HttpRule_Get); ok {
|
||||
return x.Get
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *HttpRule) GetPut() string {
|
||||
if x, ok := x.GetPattern().(*HttpRule_Put); ok {
|
||||
return x.Put
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *HttpRule) GetPost() string {
|
||||
if x, ok := x.GetPattern().(*HttpRule_Post); ok {
|
||||
return x.Post
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *HttpRule) GetDelete() string {
|
||||
if x, ok := x.GetPattern().(*HttpRule_Delete); ok {
|
||||
return x.Delete
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *HttpRule) GetPatch() string {
|
||||
if x, ok := x.GetPattern().(*HttpRule_Patch); ok {
|
||||
return x.Patch
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *HttpRule) GetCustom() *CustomHttpPattern {
|
||||
if x, ok := x.GetPattern().(*HttpRule_Custom); ok {
|
||||
return x.Custom
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *HttpRule) GetBody() string {
|
||||
if x != nil {
|
||||
return x.Body
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *HttpRule) GetResponseBody() string {
|
||||
if x != nil {
|
||||
return x.ResponseBody
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *HttpRule) GetAdditionalBindings() []*HttpRule {
|
||||
if x != nil {
|
||||
return x.AdditionalBindings
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type isHttpRule_Pattern interface {
|
||||
isHttpRule_Pattern()
|
||||
}
|
||||
|
||||
type HttpRule_Get struct {
|
||||
// Maps to HTTP GET. Used for listing and getting information about
|
||||
// resources.
|
||||
Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"`
|
||||
}
|
||||
|
||||
type HttpRule_Put struct {
|
||||
// Maps to HTTP PUT. Used for replacing a resource.
|
||||
Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"`
|
||||
}
|
||||
|
||||
type HttpRule_Post struct {
|
||||
// Maps to HTTP POST. Used for creating a resource or performing an action.
|
||||
Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"`
|
||||
}
|
||||
|
||||
type HttpRule_Delete struct {
|
||||
// Maps to HTTP DELETE. Used for deleting a resource.
|
||||
Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"`
|
||||
}
|
||||
|
||||
type HttpRule_Patch struct {
|
||||
// Maps to HTTP PATCH. Used for updating a resource.
|
||||
Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"`
|
||||
}
|
||||
|
||||
type HttpRule_Custom struct {
|
||||
// The custom pattern is used for specifying an HTTP method that is not
|
||||
// included in the `pattern` field, such as HEAD, or "*" to leave the
|
||||
// HTTP method unspecified for this rule. The wild-card rule is useful
|
||||
// for services that provide content to Web (HTML) clients.
|
||||
Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*HttpRule_Get) isHttpRule_Pattern() {}
|
||||
|
||||
func (*HttpRule_Put) isHttpRule_Pattern() {}
|
||||
|
||||
func (*HttpRule_Post) isHttpRule_Pattern() {}
|
||||
|
||||
func (*HttpRule_Delete) isHttpRule_Pattern() {}
|
||||
|
||||
func (*HttpRule_Patch) isHttpRule_Pattern() {}
|
||||
|
||||
func (*HttpRule_Custom) isHttpRule_Pattern() {}
|
||||
|
||||
// A custom pattern is used for defining custom HTTP verb.
|
||||
type CustomHttpPattern struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The name of this custom HTTP verb.
|
||||
Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
|
||||
// The path matched by this custom verb.
|
||||
Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CustomHttpPattern) Reset() {
|
||||
*x = CustomHttpPattern{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_api_http_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *CustomHttpPattern) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CustomHttpPattern) ProtoMessage() {}
|
||||
|
||||
func (x *CustomHttpPattern) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_api_http_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CustomHttpPattern.ProtoReflect.Descriptor instead.
|
||||
func (*CustomHttpPattern) Descriptor() ([]byte, []int) {
|
||||
return file_google_api_http_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *CustomHttpPattern) GetKind() string {
|
||||
if x != nil {
|
||||
return x.Kind
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *CustomHttpPattern) GetPath() string {
|
||||
if x != nil {
|
||||
return x.Path
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_google_api_http_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_api_http_proto_rawDesc = []byte{
|
||||
0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74,
|
||||
0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
|
||||
0x61, 0x70, 0x69, 0x22, 0x79, 0x0a, 0x04, 0x48, 0x74, 0x74, 0x70, 0x12, 0x2a, 0x0a, 0x05, 0x72,
|
||||
0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65,
|
||||
0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x1f, 0x66, 0x75, 0x6c, 0x6c, 0x79,
|
||||
0x5f, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
|
||||
0x5f, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
|
||||
0x52, 0x1c, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73,
|
||||
0x65, 0x72, 0x76, 0x65, 0x64, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xda,
|
||||
0x02, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73,
|
||||
0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73,
|
||||
0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x03, 0x70,
|
||||
0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12,
|
||||
0x14, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52,
|
||||
0x04, 0x70, 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18,
|
||||
0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12,
|
||||
0x16, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00,
|
||||
0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f,
|
||||
0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50,
|
||||
0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d,
|
||||
0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
|
||||
0x62, 0x6f, 0x64, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x13, 0x61, 0x64, 0x64,
|
||||
0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73,
|
||||
0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
|
||||
0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x12, 0x61, 0x64,
|
||||
0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73,
|
||||
0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x22, 0x3b, 0x0a, 0x11, 0x43,
|
||||
0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e,
|
||||
0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
|
||||
0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x09, 0x48, 0x74, 0x74, 0x70,
|
||||
0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
|
||||
0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61,
|
||||
0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04,
|
||||
0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_api_http_proto_rawDescOnce sync.Once
|
||||
file_google_api_http_proto_rawDescData = file_google_api_http_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_api_http_proto_rawDescGZIP() []byte {
|
||||
file_google_api_http_proto_rawDescOnce.Do(func() {
|
||||
file_google_api_http_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_http_proto_rawDescData)
|
||||
})
|
||||
return file_google_api_http_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_api_http_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
|
||||
var file_google_api_http_proto_goTypes = []interface{}{
|
||||
(*Http)(nil), // 0: google.api.Http
|
||||
(*HttpRule)(nil), // 1: google.api.HttpRule
|
||||
(*CustomHttpPattern)(nil), // 2: google.api.CustomHttpPattern
|
||||
}
|
||||
var file_google_api_http_proto_depIdxs = []int32{
|
||||
1, // 0: google.api.Http.rules:type_name -> google.api.HttpRule
|
||||
2, // 1: google.api.HttpRule.custom:type_name -> google.api.CustomHttpPattern
|
||||
1, // 2: google.api.HttpRule.additional_bindings:type_name -> google.api.HttpRule
|
||||
3, // [3:3] is the sub-list for method output_type
|
||||
3, // [3:3] is the sub-list for method input_type
|
||||
3, // [3:3] is the sub-list for extension type_name
|
||||
3, // [3:3] is the sub-list for extension extendee
|
||||
0, // [0:3] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_api_http_proto_init() }
|
||||
func file_google_api_http_proto_init() {
|
||||
if File_google_api_http_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_google_api_http_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Http); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_google_api_http_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*HttpRule); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_google_api_http_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*CustomHttpPattern); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
file_google_api_http_proto_msgTypes[1].OneofWrappers = []interface{}{
|
||||
(*HttpRule_Get)(nil),
|
||||
(*HttpRule_Put)(nil),
|
||||
(*HttpRule_Post)(nil),
|
||||
(*HttpRule_Delete)(nil),
|
||||
(*HttpRule_Patch)(nil),
|
||||
(*HttpRule_Custom)(nil),
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_api_http_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 3,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_api_http_proto_goTypes,
|
||||
DependencyIndexes: file_google_api_http_proto_depIdxs,
|
||||
MessageInfos: file_google_api_http_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_api_http_proto = out.File
|
||||
file_google_api_http_proto_rawDesc = nil
|
||||
file_google_api_http_proto_goTypes = nil
|
||||
file_google_api_http_proto_depIdxs = nil
|
||||
}
|
@ -7,6 +7,7 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
duration "github.com/golang/protobuf/ptypes/duration"
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
@ -123,30 +124,144 @@ func (x *IdentifyResponse) GetUris() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
type Timer struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Duration *duration.Duration `protobuf:"bytes,1,opt,name=duration,proto3" json:"duration,omitempty"`
|
||||
Current *duration.Duration `protobuf:"bytes,2,opt,name=current,proto3" json:"current,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Timer) Reset() {
|
||||
*x = Timer{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_schnutibox_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Timer) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Timer) ProtoMessage() {}
|
||||
|
||||
func (x *Timer) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_schnutibox_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Timer.ProtoReflect.Descriptor instead.
|
||||
func (*Timer) Descriptor() ([]byte, []int) {
|
||||
return file_schnutibox_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *Timer) GetDuration() *duration.Duration {
|
||||
if x != nil {
|
||||
return x.Duration
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Timer) GetCurrent() *duration.Duration {
|
||||
if x != nil {
|
||||
return x.Current
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type TimerEmpty struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *TimerEmpty) Reset() {
|
||||
*x = TimerEmpty{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_schnutibox_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *TimerEmpty) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*TimerEmpty) ProtoMessage() {}
|
||||
|
||||
func (x *TimerEmpty) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_schnutibox_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use TimerEmpty.ProtoReflect.Descriptor instead.
|
||||
func (*TimerEmpty) Descriptor() ([]byte, []int) {
|
||||
return file_schnutibox_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
var File_schnutibox_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_schnutibox_proto_rawDesc = []byte{
|
||||
0x0a, 0x10, 0x73, 0x63, 0x68, 0x6e, 0x75, 0x74, 0x69, 0x62, 0x6f, 0x78, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x12, 0x0d, 0x73, 0x63, 0x68, 0x6e, 0x75, 0x74, 0x69, 0x62, 0x6f, 0x78, 0x2e, 0x76,
|
||||
0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
|
||||
0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
|
||||
0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
|
||||
0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
|
||||
0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
|
||||
0x21, 0x0a, 0x0f, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02,
|
||||
0x69, 0x64, 0x22, 0x3a, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72,
|
||||
0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x69, 0x73, 0x32, 0x79,
|
||||
0x0a, 0x11, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76,
|
||||
0x69, 0x63, 0x65, 0x12, 0x64, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x12,
|
||||
0x1e, 0x2e, 0x73, 0x63, 0x68, 0x6e, 0x75, 0x74, 0x69, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x1f, 0x2e, 0x73, 0x63, 0x68, 0x6e, 0x75, 0x74, 0x69, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x22, 0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x11, 0x22, 0x0c, 0x2f, 0x76, 0x31, 0x2f, 0x69, 0x64,
|
||||
0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x3a, 0x01, 0x2a, 0x42, 0x23, 0x5a, 0x21, 0x67, 0x6f, 0x2e,
|
||||
0x78, 0x73, 0x66, 0x78, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x73, 0x63, 0x68, 0x6e, 0x75, 0x74, 0x69,
|
||||
0x62, 0x6f, 0x78, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x62, 0x06,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x69, 0x73, 0x22, 0x73,
|
||||
0x0a, 0x05, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33,
|
||||
0x0a, 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x63, 0x75, 0x72, 0x72,
|
||||
0x65, 0x6e, 0x74, 0x22, 0x0c, 0x0a, 0x0a, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x45, 0x6d, 0x70, 0x74,
|
||||
0x79, 0x32, 0x7d, 0x0a, 0x11, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x53,
|
||||
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x68, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69,
|
||||
0x66, 0x79, 0x12, 0x1e, 0x2e, 0x73, 0x63, 0x68, 0x6e, 0x75, 0x74, 0x69, 0x62, 0x6f, 0x78, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x63, 0x68, 0x6e, 0x75, 0x74, 0x69, 0x62, 0x6f, 0x78, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x22, 0x10, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x2f, 0x76, 0x31, 0x2f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x3a, 0x01, 0x2a,
|
||||
0x32, 0xb0, 0x01, 0x0a, 0x0c, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
|
||||
0x65, 0x12, 0x4e, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x14, 0x2e, 0x73, 0x63,
|
||||
0x68, 0x6e, 0x75, 0x74, 0x69, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65,
|
||||
0x72, 0x1a, 0x14, 0x2e, 0x73, 0x63, 0x68, 0x6e, 0x75, 0x74, 0x69, 0x62, 0x6f, 0x78, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x22,
|
||||
0x0d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x72, 0x3a, 0x01,
|
||||
0x2a, 0x12, 0x50, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x19, 0x2e, 0x73, 0x63, 0x68, 0x6e, 0x75,
|
||||
0x74, 0x69, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x45, 0x6d,
|
||||
0x70, 0x74, 0x79, 0x1a, 0x14, 0x2e, 0x73, 0x63, 0x68, 0x6e, 0x75, 0x74, 0x69, 0x62, 0x6f, 0x78,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02,
|
||||
0x12, 0x12, 0x0d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x72,
|
||||
0x3a, 0x01, 0x2a, 0x42, 0x23, 0x5a, 0x21, 0x67, 0x6f, 0x2e, 0x78, 0x73, 0x66, 0x78, 0x2e, 0x64,
|
||||
0x65, 0x76, 0x2f, 0x73, 0x63, 0x68, 0x6e, 0x75, 0x74, 0x69, 0x62, 0x6f, 0x78, 0x2f, 0x70, 0x6b,
|
||||
0x67, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
@ -161,19 +276,28 @@ func file_schnutibox_proto_rawDescGZIP() []byte {
|
||||
return file_schnutibox_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_schnutibox_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_schnutibox_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||
var file_schnutibox_proto_goTypes = []interface{}{
|
||||
(*IdentifyRequest)(nil), // 0: schnutibox.v1.IdentifyRequest
|
||||
(*IdentifyResponse)(nil), // 1: schnutibox.v1.IdentifyResponse
|
||||
(*IdentifyRequest)(nil), // 0: schnutibox.v1.IdentifyRequest
|
||||
(*IdentifyResponse)(nil), // 1: schnutibox.v1.IdentifyResponse
|
||||
(*Timer)(nil), // 2: schnutibox.v1.Timer
|
||||
(*TimerEmpty)(nil), // 3: schnutibox.v1.TimerEmpty
|
||||
(*duration.Duration)(nil), // 4: google.protobuf.Duration
|
||||
}
|
||||
var file_schnutibox_proto_depIdxs = []int32{
|
||||
0, // 0: schnutibox.v1.IdentifierService.Identify:input_type -> schnutibox.v1.IdentifyRequest
|
||||
1, // 1: schnutibox.v1.IdentifierService.Identify:output_type -> schnutibox.v1.IdentifyResponse
|
||||
1, // [1:2] is the sub-list for method output_type
|
||||
0, // [0:1] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
4, // 0: schnutibox.v1.Timer.duration:type_name -> google.protobuf.Duration
|
||||
4, // 1: schnutibox.v1.Timer.current:type_name -> google.protobuf.Duration
|
||||
0, // 2: schnutibox.v1.IdentifierService.Identify:input_type -> schnutibox.v1.IdentifyRequest
|
||||
2, // 3: schnutibox.v1.TimerService.Create:input_type -> schnutibox.v1.Timer
|
||||
3, // 4: schnutibox.v1.TimerService.Get:input_type -> schnutibox.v1.TimerEmpty
|
||||
1, // 5: schnutibox.v1.IdentifierService.Identify:output_type -> schnutibox.v1.IdentifyResponse
|
||||
2, // 6: schnutibox.v1.TimerService.Create:output_type -> schnutibox.v1.Timer
|
||||
2, // 7: schnutibox.v1.TimerService.Get:output_type -> schnutibox.v1.Timer
|
||||
5, // [5:8] is the sub-list for method output_type
|
||||
2, // [2:5] is the sub-list for method input_type
|
||||
2, // [2:2] is the sub-list for extension type_name
|
||||
2, // [2:2] is the sub-list for extension extendee
|
||||
0, // [0:2] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_schnutibox_proto_init() }
|
||||
@ -206,6 +330,30 @@ func file_schnutibox_proto_init() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_schnutibox_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Timer); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_schnutibox_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*TimerEmpty); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
@ -213,9 +361,9 @@ func file_schnutibox_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_schnutibox_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumMessages: 4,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
NumServices: 2,
|
||||
},
|
||||
GoTypes: file_schnutibox_proto_goTypes,
|
||||
DependencyIndexes: file_schnutibox_proto_depIdxs,
|
||||
|
@ -65,6 +65,58 @@ func local_request_IdentifierService_Identify_0(ctx context.Context, marshaler r
|
||||
|
||||
}
|
||||
|
||||
func request_TimerService_Create_0(ctx context.Context, marshaler runtime.Marshaler, client TimerServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq Timer
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.Create(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_TimerService_Create_0(ctx context.Context, marshaler runtime.Marshaler, server TimerServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq Timer
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.Create(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_TimerService_Get_0(ctx context.Context, marshaler runtime.Marshaler, client TimerServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq TimerEmpty
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_TimerService_Get_0(ctx context.Context, marshaler runtime.Marshaler, server TimerServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq TimerEmpty
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
msg, err := server.Get(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
// RegisterIdentifierServiceHandlerServer registers the http handlers for service IdentifierService to "mux".
|
||||
// UnaryRPC :call IdentifierServiceServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
@ -97,6 +149,61 @@ func RegisterIdentifierServiceHandlerServer(ctx context.Context, mux *runtime.Se
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterTimerServiceHandlerServer registers the http handlers for service TimerService to "mux".
|
||||
// UnaryRPC :call TimerServiceServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterTimerServiceHandlerFromEndpoint instead.
|
||||
func RegisterTimerServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server TimerServiceServer) error {
|
||||
|
||||
mux.Handle("POST", pattern_TimerService_Create_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/schnutibox.v1.TimerService/Create")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_TimerService_Create_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_TimerService_Create_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_TimerService_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/schnutibox.v1.TimerService/Get")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_TimerService_Get_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_TimerService_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterIdentifierServiceHandlerFromEndpoint is same as RegisterIdentifierServiceHandler but
|
||||
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
||||
func RegisterIdentifierServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
||||
@ -159,9 +266,102 @@ func RegisterIdentifierServiceHandlerClient(ctx context.Context, mux *runtime.Se
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_IdentifierService_Identify_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "identify"}, ""))
|
||||
pattern_IdentifierService_Identify_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "identify"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
forward_IdentifierService_Identify_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
|
||||
// RegisterTimerServiceHandlerFromEndpoint is same as RegisterTimerServiceHandler but
|
||||
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
||||
func RegisterTimerServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
||||
conn, err := grpc.Dial(endpoint, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
|
||||
return RegisterTimerServiceHandler(ctx, mux, conn)
|
||||
}
|
||||
|
||||
// RegisterTimerServiceHandler registers the http handlers for service TimerService to "mux".
|
||||
// The handlers forward requests to the grpc endpoint over "conn".
|
||||
func RegisterTimerServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
||||
return RegisterTimerServiceHandlerClient(ctx, mux, NewTimerServiceClient(conn))
|
||||
}
|
||||
|
||||
// RegisterTimerServiceHandlerClient registers the http handlers for service TimerService
|
||||
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TimerServiceClient".
|
||||
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TimerServiceClient"
|
||||
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
|
||||
// "TimerServiceClient" to call the correct interceptors.
|
||||
func RegisterTimerServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TimerServiceClient) error {
|
||||
|
||||
mux.Handle("POST", pattern_TimerService_Create_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/schnutibox.v1.TimerService/Create")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_TimerService_Create_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_TimerService_Create_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_TimerService_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/schnutibox.v1.TimerService/Get")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_TimerService_Get_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_TimerService_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_TimerService_Create_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "timer"}, ""))
|
||||
|
||||
pattern_TimerService_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "timer"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
forward_TimerService_Create_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_TimerService_Get_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
|
@ -97,3 +97,123 @@ var IdentifierService_ServiceDesc = grpc.ServiceDesc{
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "schnutibox.proto",
|
||||
}
|
||||
|
||||
// TimerServiceClient is the client API for TimerService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type TimerServiceClient interface {
|
||||
Create(ctx context.Context, in *Timer, opts ...grpc.CallOption) (*Timer, error)
|
||||
Get(ctx context.Context, in *TimerEmpty, opts ...grpc.CallOption) (*Timer, error)
|
||||
}
|
||||
|
||||
type timerServiceClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewTimerServiceClient(cc grpc.ClientConnInterface) TimerServiceClient {
|
||||
return &timerServiceClient{cc}
|
||||
}
|
||||
|
||||
func (c *timerServiceClient) Create(ctx context.Context, in *Timer, opts ...grpc.CallOption) (*Timer, error) {
|
||||
out := new(Timer)
|
||||
err := c.cc.Invoke(ctx, "/schnutibox.v1.TimerService/Create", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *timerServiceClient) Get(ctx context.Context, in *TimerEmpty, opts ...grpc.CallOption) (*Timer, error) {
|
||||
out := new(Timer)
|
||||
err := c.cc.Invoke(ctx, "/schnutibox.v1.TimerService/Get", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// TimerServiceServer is the server API for TimerService service.
|
||||
// All implementations should embed UnimplementedTimerServiceServer
|
||||
// for forward compatibility
|
||||
type TimerServiceServer interface {
|
||||
Create(context.Context, *Timer) (*Timer, error)
|
||||
Get(context.Context, *TimerEmpty) (*Timer, error)
|
||||
}
|
||||
|
||||
// UnimplementedTimerServiceServer should be embedded to have forward compatible implementations.
|
||||
type UnimplementedTimerServiceServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedTimerServiceServer) Create(context.Context, *Timer) (*Timer, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Create not implemented")
|
||||
}
|
||||
func (UnimplementedTimerServiceServer) Get(context.Context, *TimerEmpty) (*Timer, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Get not implemented")
|
||||
}
|
||||
|
||||
// UnsafeTimerServiceServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to TimerServiceServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeTimerServiceServer interface {
|
||||
mustEmbedUnimplementedTimerServiceServer()
|
||||
}
|
||||
|
||||
func RegisterTimerServiceServer(s grpc.ServiceRegistrar, srv TimerServiceServer) {
|
||||
s.RegisterService(&TimerService_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _TimerService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Timer)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(TimerServiceServer).Create(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/schnutibox.v1.TimerService/Create",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(TimerServiceServer).Create(ctx, req.(*Timer))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _TimerService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(TimerEmpty)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(TimerServiceServer).Get(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/schnutibox.v1.TimerService/Get",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(TimerServiceServer).Get(ctx, req.(*TimerEmpty))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// TimerService_ServiceDesc is the grpc.ServiceDesc for TimerService service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var TimerService_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "schnutibox.v1.TimerService",
|
||||
HandlerType: (*TimerServiceServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "Create",
|
||||
Handler: _TimerService_Create_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Get",
|
||||
Handler: _TimerService_Get_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "schnutibox.proto",
|
||||
}
|
||||
|
56
pkg/currentsong/currentsong.go
Normal file
56
pkg/currentsong/currentsong.go
Normal file
@ -0,0 +1,56 @@
|
||||
package currentsong
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"go.xsfx.dev/logginghandler"
|
||||
)
|
||||
|
||||
var recvs = make(map[chan string]struct{}) // nolint:gochecknoglobals
|
||||
|
||||
// Write writes current track to the receivers.
|
||||
func Write(track string) {
|
||||
for k := range recvs {
|
||||
k <- track
|
||||
}
|
||||
}
|
||||
|
||||
func Handler(w http.ResponseWriter, r *http.Request) {
|
||||
logger := logginghandler.Logger(r)
|
||||
logger.Debug().Msg("got a new receiver")
|
||||
|
||||
flusher, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
logger.Error().Msg("streaming unsupported")
|
||||
http.Error(w, "streaming unsupported", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/event-stream")
|
||||
w.Header().Set("Cache-Control", "no-cache")
|
||||
w.Header().Set("Connection", "keep-alive")
|
||||
// TODO: has to be something else!
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
|
||||
cChan := make(chan string)
|
||||
|
||||
recvs[cChan] = struct{}{}
|
||||
|
||||
for {
|
||||
select {
|
||||
case e := <-cChan:
|
||||
// Send event to client.
|
||||
fmt.Fprintf(w, "data: %s\n\n", e)
|
||||
|
||||
// Send it right now and not buffering it.
|
||||
flusher.Flush()
|
||||
case <-r.Context().Done():
|
||||
close(cChan)
|
||||
delete(recvs, cChan)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
138
pkg/mpc/mpc.go
Normal file
138
pkg/mpc/mpc.go
Normal file
@ -0,0 +1,138 @@
|
||||
package mpc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/fhs/gompd/v2/mpd"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"go.xsfx.dev/schnutibox/internal/config"
|
||||
"go.xsfx.dev/schnutibox/internal/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
timeout = 5 * time.Second
|
||||
timeoutWait = time.Second / 2
|
||||
)
|
||||
|
||||
var errTimeout = errors.New("timeout")
|
||||
|
||||
func Conn() (*mpd.Client, error) {
|
||||
t := time.NewTimer(timeout)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
return nil, errTimeout
|
||||
default:
|
||||
c, err := mpd.Dial("tcp", fmt.Sprintf("%s:%d", config.Cfg.MPD.Hostname, config.Cfg.MPD.Port))
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not connect")
|
||||
|
||||
time.Sleep(timeoutWait)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if !t.Stop() {
|
||||
go func() {
|
||||
<-t.C
|
||||
}()
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PlaylistURIS extracts uris from MPD playlist.
|
||||
func PlaylistURIS(m *mpd.Client) ([]string, error) {
|
||||
attrs, err := m.PlaylistInfo(-1, -1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get playlist: %w", err)
|
||||
}
|
||||
|
||||
// Stores the tracklist it got from the MPD server.
|
||||
uris := []string{}
|
||||
|
||||
// Builds uri list.
|
||||
for _, a := range attrs {
|
||||
uris = append(uris, a["file"])
|
||||
}
|
||||
|
||||
return uris, nil
|
||||
}
|
||||
|
||||
func Stop(logger zerolog.Logger) error {
|
||||
logger.Info().Msg("trying to stop playback")
|
||||
|
||||
m, err := Conn()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not connect: %w", err)
|
||||
}
|
||||
|
||||
// nolint:wrapcheck
|
||||
return m.Stop()
|
||||
}
|
||||
|
||||
func Clear(logger zerolog.Logger) error {
|
||||
logger.Info().Msg("trying to clear playlist")
|
||||
|
||||
m, err := Conn()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not connect: %w", err)
|
||||
}
|
||||
|
||||
// nolint:wrapcheck
|
||||
return m.Clear()
|
||||
}
|
||||
|
||||
func Play(logger zerolog.Logger, rfid string, name string, uris []string) error {
|
||||
logger.Info().Msg("trying to add tracks")
|
||||
|
||||
m, err := Conn()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not connect: %w", err)
|
||||
}
|
||||
|
||||
// Stop playing track.
|
||||
if err := Stop(logger); err != nil {
|
||||
metrics.BoxErrors.Inc()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Clear playlist.
|
||||
if err := Clear(logger); err != nil {
|
||||
metrics.BoxErrors.Inc()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Adding every single uri to playlist
|
||||
for _, i := range uris {
|
||||
logger.Debug().Str("uri", i).Msg("add track")
|
||||
|
||||
if err := m.Add(i); err != nil {
|
||||
metrics.BoxErrors.Inc()
|
||||
|
||||
return fmt.Errorf("could not add track: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Getting playlist uris from MPD server.
|
||||
// This is needed to identify the right metric to use.
|
||||
mpdURIS, err := PlaylistURIS(m)
|
||||
if err != nil {
|
||||
metrics.BoxErrors.Inc()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
metrics.NewPlay(rfid, name, mpdURIS)
|
||||
|
||||
// nolint:wrapcheck
|
||||
return m.Play(-1)
|
||||
}
|
@ -33,6 +33,7 @@ const (
|
||||
snapclientGroup = "snapclient"
|
||||
)
|
||||
|
||||
// Cfg represents the structured data for the schnutibox config file.
|
||||
var Cfg = struct {
|
||||
RFIDReader string
|
||||
ReadOnly bool
|
||||
@ -63,7 +64,7 @@ func boxService(filename string, enable bool) error {
|
||||
return fmt.Errorf("could not get service file: %w", err)
|
||||
}
|
||||
|
||||
//nolint:gosec
|
||||
//nolint:gosec,gomnd
|
||||
if err := ioutil.WriteFile(filename, schnutiboxService, 0o644); err != nil {
|
||||
return fmt.Errorf("could not write service file: %w", err)
|
||||
}
|
||||
@ -105,7 +106,7 @@ func ntp() error {
|
||||
return fmt.Errorf("could not get ntp service file: %w", err)
|
||||
}
|
||||
|
||||
// nolint:gosec
|
||||
// nolint:gosec,gomnd
|
||||
if err := ioutil.WriteFile("/etc/systemd/system/ntp.service", ntpService, 0o644); err != nil {
|
||||
return fmt.Errorf("could not copy ntp service file: %w", err)
|
||||
}
|
||||
@ -396,7 +397,7 @@ func cmdlineTxt() error {
|
||||
newLine := strings.TrimSuffix(string(oldLine), "\n") + " " + "fastboot" + " " + "noswap"
|
||||
|
||||
// Write.
|
||||
// nolint:gosec
|
||||
// nolint:gosec,gomnd
|
||||
if err := ioutil.WriteFile("/boot/cmdline.txt", []byte(newLine), 0o644); err != nil {
|
||||
return fmt.Errorf("could not write cmdline.txt: %w", err)
|
||||
}
|
||||
@ -469,6 +470,7 @@ func mopidy() error {
|
||||
// Install.
|
||||
cmd = exec.Command(
|
||||
"apt-get", "install", "-y",
|
||||
"libgstreamer-plugins-bad1.0",
|
||||
"mopidy",
|
||||
"mopidy-alsamixer",
|
||||
"mopidy-mpd",
|
||||
@ -615,7 +617,7 @@ func upmpdcli() error {
|
||||
return fmt.Errorf("could not get upmpdcli.conf: %w", err)
|
||||
}
|
||||
|
||||
// nolint:gosec
|
||||
// nolint:gosec,gomnd
|
||||
if err := ioutil.WriteFile("/etc/upmpdcli.conf", upmpdcliConf, 0o644); err != nil {
|
||||
return fmt.Errorf("could not copy upmpdcli config: %w", err)
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ func (r *RFID) Run() error {
|
||||
}
|
||||
|
||||
go func() {
|
||||
buf := make([]byte, 3)
|
||||
buf := make([]byte, 3) // nolint:gomnd
|
||||
|
||||
rfid := ""
|
||||
|
||||
|
141
pkg/run/run.go
141
pkg/run/run.go
@ -1,75 +1,16 @@
|
||||
//nolint:wrapcheck
|
||||
package run
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/fhs/gompd/v2/mpd"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"go.xsfx.dev/schnutibox/internal/config"
|
||||
"go.xsfx.dev/schnutibox/internal/metrics"
|
||||
"go.xsfx.dev/schnutibox/pkg/mpc"
|
||||
"go.xsfx.dev/schnutibox/pkg/rfid"
|
||||
"go.xsfx.dev/schnutibox/pkg/watcher"
|
||||
"go.xsfx.dev/schnutibox/pkg/web"
|
||||
)
|
||||
|
||||
type mpc struct {
|
||||
conn *mpd.Client
|
||||
}
|
||||
|
||||
func newMpc(conn *mpd.Client) *mpc {
|
||||
return &mpc{conn}
|
||||
}
|
||||
|
||||
func (m *mpc) stop(logger zerolog.Logger) error {
|
||||
logger.Info().Msg("trying to stop playback")
|
||||
|
||||
return m.conn.Stop()
|
||||
}
|
||||
|
||||
func (m *mpc) clear(logger zerolog.Logger) error {
|
||||
logger.Info().Msg("trying to clear playlist")
|
||||
|
||||
return m.conn.Clear()
|
||||
}
|
||||
|
||||
func (m *mpc) play(logger zerolog.Logger, rfid string, name string, uris []string) error {
|
||||
logger.Info().Msg("trying to add tracks")
|
||||
|
||||
// Metric labels.
|
||||
mLabels := []string{rfid, name}
|
||||
|
||||
// Stop playing track.
|
||||
if err := m.stop(logger); err != nil {
|
||||
metrics.BoxErrors.Inc()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Clear playlist.
|
||||
if err := m.clear(logger); err != nil {
|
||||
metrics.BoxErrors.Inc()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Adding every single uri to playlist
|
||||
for _, i := range uris {
|
||||
logger.Debug().Str("uri", i).Msg("add track")
|
||||
|
||||
if err := m.conn.Add(i); err != nil {
|
||||
metrics.BoxErrors.Inc()
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
metrics.TracksPlayed.WithLabelValues(mLabels...).Inc()
|
||||
|
||||
return m.conn.Play(-1)
|
||||
}
|
||||
|
||||
func Run(cmd *cobra.Command, args []string) {
|
||||
log.Info().Msg("starting the RFID reader")
|
||||
|
||||
@ -77,57 +18,57 @@ func Run(cmd *cobra.Command, args []string) {
|
||||
r := rfid.NewRFID(config.Cfg, idChan)
|
||||
|
||||
if err := r.Run(); err != nil {
|
||||
log.Fatal().Err(err).Msg("could not start RFID reader")
|
||||
if !viper.GetBool("reader.ignore") {
|
||||
log.Fatal().Err(err).Msg("could not start RFID reader")
|
||||
}
|
||||
|
||||
log.Warn().Err(err).Msg("could not start RFID reader. ignoring...")
|
||||
}
|
||||
|
||||
go func() {
|
||||
var id string
|
||||
// Stating watcher.
|
||||
watcher.Run()
|
||||
|
||||
for {
|
||||
// Wating for a scanned tag.
|
||||
id = <-idChan
|
||||
logger := log.With().Str("id", id).Logger()
|
||||
logger.Info().Msg("received id")
|
||||
// nolint:nestif
|
||||
if !viper.GetBool("reader.ignore") {
|
||||
go func() {
|
||||
var id string
|
||||
|
||||
// Create MPD connection on every received event.
|
||||
c, err := mpd.Dial("tcp", fmt.Sprintf("%s:%d", config.Cfg.MPD.Hostname, config.Cfg.MPD.Port))
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("could not connect to MPD server")
|
||||
for {
|
||||
// Wating for a scanned tag.
|
||||
id = <-idChan
|
||||
logger := log.With().Str("id", id).Logger()
|
||||
logger.Info().Msg("received id")
|
||||
|
||||
continue
|
||||
}
|
||||
// Check of stop tag was detected.
|
||||
if id == config.Cfg.Meta.Stop {
|
||||
logger.Info().Msg("stopping")
|
||||
|
||||
m := newMpc(c)
|
||||
if err := mpc.Stop(logger); err != nil {
|
||||
logger.Error().Err(err).Msg("could not stop")
|
||||
}
|
||||
|
||||
// Check of stop tag was detected.
|
||||
if id == config.Cfg.Meta.Stop {
|
||||
logger.Info().Msg("stopping")
|
||||
if err := mpc.Clear(logger); err != nil {
|
||||
logger.Error().Err(err).Msg("could not clear")
|
||||
}
|
||||
|
||||
if err := m.stop(logger); err != nil {
|
||||
logger.Error().Err(err).Msg("could not stop")
|
||||
continue
|
||||
}
|
||||
|
||||
if err := m.clear(logger); err != nil {
|
||||
logger.Error().Err(err).Msg("could not clear")
|
||||
// Check if there is a track for the ID.
|
||||
tracks, ok := config.Cfg.Tracks[id]
|
||||
if !ok {
|
||||
logger.Error().Msg("could not find track for ID")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
continue
|
||||
// Try to play track.
|
||||
if err := mpc.Play(logger, id, tracks.Name, tracks.Uris); err != nil {
|
||||
logger.Error().Err(err).Msg("could not play track")
|
||||
}
|
||||
}
|
||||
|
||||
// Check if there is a track for the ID.
|
||||
tracks, ok := config.Cfg.Tracks[id]
|
||||
if !ok {
|
||||
logger.Error().Msg("could not find track for ID")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to play track.
|
||||
if err := m.play(logger, id, tracks.Name, tracks.Uris); err != nil {
|
||||
logger.Error().Err(err).Msg("could not play track")
|
||||
}
|
||||
}
|
||||
}()
|
||||
}()
|
||||
}
|
||||
|
||||
// Running web interface. Blocking.
|
||||
web.Run(cmd, args)
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"go.xsfx.dev/logginghandler"
|
||||
)
|
||||
|
||||
// Log is the global sse logger struct.
|
||||
var Log *SSELog
|
||||
|
||||
type SSELog struct {
|
||||
|
79
pkg/timer/timer.go
Normal file
79
pkg/timer/timer.go
Normal file
@ -0,0 +1,79 @@
|
||||
package timer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/duration"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"go.xsfx.dev/schnutibox/internal/config"
|
||||
"go.xsfx.dev/schnutibox/internal/grpcclient"
|
||||
api "go.xsfx.dev/schnutibox/pkg/api/v1"
|
||||
"go.xsfx.dev/schnutibox/pkg/mpc"
|
||||
"google.golang.org/protobuf/types/known/durationpb"
|
||||
)
|
||||
|
||||
// T is the global Timer object.
|
||||
// nolint:gochecknoglobals
|
||||
var T = &Timer{}
|
||||
|
||||
type Timer struct {
|
||||
Req *api.Timer
|
||||
}
|
||||
|
||||
func (t *Timer) Handle() {
|
||||
if t.Req != nil {
|
||||
// Initialize the current object.
|
||||
if t.Req.Current == nil {
|
||||
t.Req.Current = &duration.Duration{}
|
||||
t.Req.Current.Seconds = t.Req.Duration.Seconds
|
||||
}
|
||||
|
||||
switch {
|
||||
// There is some timing going on.
|
||||
case t.Req.Duration.Seconds != 0 && t.Req.Current.Seconds != 0:
|
||||
log.Debug().
|
||||
Int64("current", t.Req.Current.Seconds).
|
||||
Int64("duration", t.Req.Duration.Seconds).
|
||||
Msg("timer is running")
|
||||
|
||||
if t.Req.Current.Seconds > 0 {
|
||||
t.Req.Current.Seconds--
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// No timer is running... so setting the duration to 0.
|
||||
case t.Req.Current.Seconds == 0 && t.Req.Duration.Seconds != 0:
|
||||
log.Debug().Msg("stoping timer")
|
||||
|
||||
if err := mpc.Stop(log.Logger); err != nil {
|
||||
log.Error().Err(err).Msg("could not stop")
|
||||
}
|
||||
|
||||
t.Req.Duration.Seconds = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run is the command line interface for triggering the timer.
|
||||
func Run(cmd *cobra.Command, args []string) {
|
||||
conn, err := grpcclient.Conn(config.Cfg.Web.Hostname, config.Cfg.Web.Port)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("could not connect")
|
||||
}
|
||||
|
||||
c := api.NewTimerServiceClient(conn)
|
||||
|
||||
d := durationpb.New(viper.GetDuration("timer.duration"))
|
||||
|
||||
_, err = c.Create(context.Background(), &api.Timer{Duration: d})
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
log.Fatal().Err(err).Msg("could not create timer")
|
||||
}
|
||||
|
||||
conn.Close()
|
||||
log.Info().Msg("added timer")
|
||||
}
|
75
pkg/watcher/watcher.go
Normal file
75
pkg/watcher/watcher.go
Normal file
@ -0,0 +1,75 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"go.xsfx.dev/schnutibox/internal/metrics"
|
||||
"go.xsfx.dev/schnutibox/pkg/currentsong"
|
||||
"go.xsfx.dev/schnutibox/pkg/mpc"
|
||||
"go.xsfx.dev/schnutibox/pkg/timer"
|
||||
)
|
||||
|
||||
const tickerTime = time.Second
|
||||
|
||||
// Run runs actions after tickerTime is over, over again and again.
|
||||
// Right now its mostly used for setting metrics.
|
||||
func Run() {
|
||||
log.Debug().Msg("starting watch")
|
||||
|
||||
ticker := time.NewTicker(tickerTime)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
<-ticker.C
|
||||
|
||||
// Timer.
|
||||
go timer.T.Handle()
|
||||
|
||||
// Metrics.
|
||||
go func() {
|
||||
m, err := mpc.Conn()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not connect")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
uris, err := mpc.PlaylistURIS(m)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not get playlist uris")
|
||||
metrics.BoxErrors.Inc()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Gettings MPD state.
|
||||
s, err := m.Status()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not get status")
|
||||
metrics.BoxErrors.Inc()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
currentSong, err := m.CurrentSong()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not get current song")
|
||||
metrics.BoxErrors.Inc()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if len(currentSong) != 0 {
|
||||
currentsong.Write(fmt.Sprintf("%s - %s", currentSong["Artist"], currentSong["Track"]))
|
||||
} else {
|
||||
currentsong.Write("")
|
||||
}
|
||||
|
||||
// Sets the metrics.
|
||||
metrics.Set(uris, s["state"])
|
||||
}()
|
||||
}
|
||||
}()
|
||||
}
|
45
pkg/web/timer_test.go
Normal file
45
pkg/web/timer_test.go
Normal file
@ -0,0 +1,45 @@
|
||||
package web_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
api "go.xsfx.dev/schnutibox/pkg/api/v1"
|
||||
"go.xsfx.dev/schnutibox/pkg/web"
|
||||
"google.golang.org/protobuf/types/known/durationpb"
|
||||
)
|
||||
|
||||
func TestTimerService(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tables := []struct {
|
||||
name string
|
||||
req *api.Timer
|
||||
expected *api.Timer
|
||||
err error
|
||||
}{
|
||||
{
|
||||
"10 seconds",
|
||||
&api.Timer{Duration: &durationpb.Duration{Seconds: 10}},
|
||||
&api.Timer{Duration: &durationpb.Duration{Seconds: 10}},
|
||||
nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, table := range tables {
|
||||
table := table
|
||||
t.Run(table.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
require := require.New(t)
|
||||
ctx := context.Background()
|
||||
timerSvc := web.TimerServer{}
|
||||
resp, err := timerSvc.Create(ctx, &api.Timer{Duration: &durationpb.Duration{Seconds: 10}})
|
||||
if table.err == nil {
|
||||
require.NoError(err)
|
||||
}
|
||||
|
||||
require.Equal(table.expected, resp)
|
||||
})
|
||||
}
|
||||
}
|
@ -5,9 +5,11 @@ import (
|
||||
"fmt"
|
||||
"html/template"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
"strings"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
zerolog "github.com/philip-bui/grpc-zerolog"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
@ -15,7 +17,9 @@ import (
|
||||
assets "go.xsfx.dev/schnutibox/assets/web"
|
||||
"go.xsfx.dev/schnutibox/internal/config"
|
||||
api "go.xsfx.dev/schnutibox/pkg/api/v1"
|
||||
"go.xsfx.dev/schnutibox/pkg/currentsong"
|
||||
"go.xsfx.dev/schnutibox/pkg/sselog"
|
||||
"go.xsfx.dev/schnutibox/pkg/timer"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/h2c"
|
||||
"google.golang.org/grpc"
|
||||
@ -53,11 +57,11 @@ func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Ha
|
||||
}), &http2.Server{})
|
||||
}
|
||||
|
||||
type server struct{}
|
||||
type identifyServer struct{}
|
||||
|
||||
// Identify searches in tracks config for entries and returns them.
|
||||
// nolint:goerr113
|
||||
func (s server) Identify(ctx context.Context, in *api.IdentifyRequest) (*api.IdentifyResponse, error) {
|
||||
func (i identifyServer) Identify(ctx context.Context, in *api.IdentifyRequest) (*api.IdentifyResponse, error) {
|
||||
r := &api.IdentifyResponse{}
|
||||
|
||||
if in.Id == "" {
|
||||
@ -75,11 +79,34 @@ func (s server) Identify(ctx context.Context, in *api.IdentifyRequest) (*api.Ide
|
||||
return r, nil
|
||||
}
|
||||
|
||||
type TimerServer struct{}
|
||||
|
||||
func (t TimerServer) Create(ctx context.Context, req *api.Timer) (*api.Timer, error) {
|
||||
timer.T.Req = req
|
||||
|
||||
return timer.T.Req, nil
|
||||
}
|
||||
|
||||
// Get just returns the status of the timer.
|
||||
func (t TimerServer) Get(ctx context.Context, req *api.TimerEmpty) (*api.Timer, error) {
|
||||
// Nothing there yet, so return a fresh struct.
|
||||
if timer.T.Req == nil {
|
||||
return &api.Timer{}, nil
|
||||
}
|
||||
|
||||
return timer.T.Req, nil
|
||||
}
|
||||
|
||||
func currentSong(w http.ResponseWriter, r *http.Request) {}
|
||||
|
||||
func gw(s *grpc.Server, conn string) *runtime.ServeMux {
|
||||
ctx := context.Background()
|
||||
gopts := []grpc.DialOption{grpc.WithInsecure()}
|
||||
|
||||
api.RegisterIdentifierServiceServer(s, server{})
|
||||
api.RegisterIdentifierServiceServer(s, identifyServer{})
|
||||
api.RegisterTimerServiceServer(s, TimerServer{})
|
||||
|
||||
// Adds reflections.
|
||||
reflection.Register(s)
|
||||
|
||||
gwmux := runtime.NewServeMux()
|
||||
@ -87,28 +114,48 @@ func gw(s *grpc.Server, conn string) *runtime.ServeMux {
|
||||
log.Fatal().Err(err).Msg("could not register grpc endpoint")
|
||||
}
|
||||
|
||||
if err := api.RegisterTimerServiceHandlerFromEndpoint(ctx, gwmux, conn, gopts); err != nil {
|
||||
log.Fatal().Err(err).Msg("could not register grpc endpoint")
|
||||
}
|
||||
|
||||
return gwmux
|
||||
}
|
||||
|
||||
func Run(command *cobra.Command, args []string) {
|
||||
// Create host string for serving web.
|
||||
lh := fmt.Sprintf("%s:%d", config.Cfg.Box.Hostname, config.Cfg.Box.Port)
|
||||
lh := fmt.Sprintf("%s:%d", config.Cfg.Web.Hostname, config.Cfg.Web.Port)
|
||||
|
||||
// Create grpc server.
|
||||
grpcServer := grpc.NewServer()
|
||||
grpcServer := grpc.NewServer(
|
||||
zerolog.UnaryInterceptor(),
|
||||
)
|
||||
|
||||
// Define http handlers.
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/", logginghandler.Handler(http.HandlerFunc(root)))
|
||||
mux.Handle("/log", logginghandler.Handler(http.HandlerFunc(sselog.LogHandler)))
|
||||
|
||||
mux.Handle("/", http.HandlerFunc(root))
|
||||
|
||||
mux.Handle("/log", http.HandlerFunc(sselog.LogHandler))
|
||||
mux.Handle("/currentsong", http.HandlerFunc(currentsong.Handler))
|
||||
|
||||
mux.Handle(
|
||||
"/static/",
|
||||
logginghandler.Handler(
|
||||
http.StripPrefix("/static/", http.FileServer(http.FS(assets.Files))),
|
||||
),
|
||||
http.StripPrefix("/static/", http.FileServer(assets.Files)),
|
||||
)
|
||||
|
||||
mux.Handle(
|
||||
"/swagger-ui/",
|
||||
http.StripPrefix("/swagger-ui/", http.FileServer(assets.SwaggerUI)),
|
||||
)
|
||||
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
mux.Handle("/api/", http.StripPrefix("/api", gw(grpcServer, lh)))
|
||||
|
||||
mux.Handle("/api/", gw(grpcServer, lh))
|
||||
|
||||
// PPROF.
|
||||
if config.Cfg.Debug.PPROF {
|
||||
mux.HandleFunc("/debug/pprof/", pprof.Index)
|
||||
}
|
||||
|
||||
// Serving http.
|
||||
log.Info().Msgf("serving HTTP on %s...", lh)
|
||||
|
@ -1,36 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -ex
|
||||
|
||||
if [ -z "$RASPBIAN_IMAGE" ]; then
|
||||
echo "missing \$RASPBIAN_IMAGE environment variable"
|
||||
exit 1
|
||||
fi
|
||||
imagePath=$RASPBIAN_IMAGE
|
||||
|
||||
# resize partition
|
||||
truncate -s +3G "$imagePath"
|
||||
parted "$imagePath" resizepart 2 3G
|
||||
|
||||
# create loop devices
|
||||
bootLoopDevice="$(kpartx -l "$imagePath" | sed -n 1p | awk '{print $1}')"
|
||||
systemLoopDevice="$(kpartx -l "$imagePath" | sed -n 2p | awk '{print $1}')"
|
||||
|
||||
if [ -z "$bootLoopDevice" ] || [ -z "$systemLoopDevice" ]; then
|
||||
echo "could not extract loop devices"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
kpartx -avs "$imagePath"
|
||||
|
||||
# resize filesystem
|
||||
resize2fs "/dev/mapper/${systemLoopDevice}"
|
||||
|
||||
# enable ssh
|
||||
mkdir -p boot
|
||||
mount -o loop "/dev/mapper/${bootLoopDevice}" boot
|
||||
touch boot/ssh
|
||||
|
||||
# cleaning up
|
||||
umount boot
|
||||
kpartx -d "$imagePath"
|
||||
rm -rf boot
|
3
scripts/putio/main.go
Normal file
3
scripts/putio/main.go
Normal file
@ -0,0 +1,3 @@
|
||||
package main
|
||||
|
||||
func main() {}
|
54
scripts/rpi-image-test/build.sh
Executable file
54
scripts/rpi-image-test/build.sh
Executable file
@ -0,0 +1,54 @@
|
||||
#!/bin/sh -eux
|
||||
|
||||
set -ex
|
||||
|
||||
RASPBIAN_IMAGE=2021-05-07-raspios-buster-armhf-lite.img
|
||||
|
||||
# prepare
|
||||
# =======
|
||||
mkdir /SWAP
|
||||
mount -t 9p -o trans=virtio,version=9p2000.L host0 /SWAP
|
||||
|
||||
echo "http://dl-cdn.alpinelinux.org/alpine/v3.13/community/" >>/etc/apk/repositories
|
||||
apk add aria2 coreutils e2fsprogs-extra parted util-linux multipath-tools
|
||||
|
||||
cd /SWAP
|
||||
|
||||
aria2c --seed-time=0 https://downloads.raspberrypi.org/raspios_lite_armhf/images/raspios_lite_armhf-2021-05-28/2021-05-07-raspios-buster-armhf-lite.zip.torrent
|
||||
unzip 2021-05-07-raspios-buster-armhf-lite.zip
|
||||
|
||||
rm 2021-05-07-raspios-buster-armhf-lite.zip*
|
||||
|
||||
# building
|
||||
# ========
|
||||
imagePath=$RASPBIAN_IMAGE
|
||||
|
||||
# resize partition
|
||||
truncate -s +5G "$imagePath"
|
||||
parted "$imagePath" resizepart 2 5G
|
||||
|
||||
# create loop devices
|
||||
kpartx -avs "$imagePath"
|
||||
|
||||
ls -la /dev/mapper
|
||||
|
||||
bootLoopDevice="$(find /dev/mapper -name 'loop*p1')"
|
||||
systemLoopDevice="$(find /dev/mapper -name 'loop*p2')"
|
||||
|
||||
if [ -z "$bootLoopDevice" ] || [ -z "$systemLoopDevice" ]; then
|
||||
echo "could not extract loop devices"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# resize filesystem
|
||||
resize2fs "$systemLoopDevice"
|
||||
|
||||
# enable ssh
|
||||
mkdir -p boot
|
||||
mount "$bootLoopDevice" boot
|
||||
touch boot/ssh
|
||||
|
||||
# cleaning up
|
||||
umount boot
|
||||
kpartx -d "$imagePath"
|
||||
rm -rf boot
|
1
tools.go
1
tools.go
@ -6,6 +6,7 @@ import (
|
||||
_ "github.com/bufbuild/buf/cmd/buf"
|
||||
_ "github.com/bufbuild/buf/cmd/protoc-gen-buf-breaking"
|
||||
_ "github.com/bufbuild/buf/cmd/protoc-gen-buf-lint"
|
||||
_ "github.com/cosmtrek/air"
|
||||
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway"
|
||||
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2"
|
||||
_ "google.golang.org/grpc/cmd/protoc-gen-go-grpc"
|
||||
|
3
vendor/github.com/cosmtrek/air/.dockerignore
generated
vendored
Normal file
3
vendor/github.com/cosmtrek/air/.dockerignore
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
bin
|
||||
vendor
|
||||
tmp
|
8
vendor/github.com/cosmtrek/air/.editorconfig
generated
vendored
Normal file
8
vendor/github.com/cosmtrek/air/.editorconfig
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
root = true
|
||||
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
indent_size = 4
|
13
vendor/github.com/cosmtrek/air/.gitignore
generated
vendored
Normal file
13
vendor/github.com/cosmtrek/air/.gitignore
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
*.test
|
||||
*.prof
|
||||
*.out
|
||||
|
||||
vendor/
|
||||
tmp/
|
||||
|
||||
# IDE specific files
|
||||
.vscode
|
||||
.idea
|
16
vendor/github.com/cosmtrek/air/.goreleaser.yml
generated
vendored
Normal file
16
vendor/github.com/cosmtrek/air/.goreleaser.yml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
builds:
|
||||
- goos:
|
||||
- linux
|
||||
- windows
|
||||
- darwin
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: 386
|
||||
ldflags:
|
||||
- -s -w -X "main.airVersion={{.Version}}"
|
||||
- -s -w -X "main.goVersion={{.Env.GOVERSION}}"
|
||||
archives:
|
||||
- id: tar.gz
|
||||
format: tar.gz
|
||||
- id: binary
|
||||
format: binary
|
33
vendor/github.com/cosmtrek/air/CHANGELOG.md
generated
vendored
Normal file
33
vendor/github.com/cosmtrek/air/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
# Air Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
## [1.12.1] 2020-03-21
|
||||
|
||||
* add kill_delay [#49](https://github.com/cosmtrek/air/issues/29), credited to [wsnotify](https://github.com/wsnotify)
|
||||
* build on Go1.14
|
||||
|
||||
## [1.12.0] 2020-01-01
|
||||
|
||||
* add stop_on_error [#38](https://github.com/cosmtrek/air/issues/38)
|
||||
* add exclude_file [#39](https://github.com/cosmtrek/air/issues/39)
|
||||
* add include_dir [#40](https://github.com/cosmtrek/air/issues/40)
|
||||
|
||||
## [1.11.1] 2019-11-10
|
||||
|
||||
* Update third-party libraries.
|
||||
* Fix [#8](https://github.com/cosmtrek/air/issues/8) and [#17](https://github.com/cosmtrek/air/issues/17) that logs display incorrectly.
|
||||
* support customizing binary in config [#28](https://github.com/cosmtrek/air/issues/28).
|
||||
* Support deleting tmp dir on exit [20](https://github.com/cosmtrek/air/issues/20).
|
||||
|
||||
## [1.10] 2018-12-30
|
||||
|
||||
* Fix some panics when unexpected things happened.
|
||||
* Fix the issue [#8](https://github.com/cosmtrek/air/issues/8) that server log color was overridden. This feature only works on Linux and macOS.
|
||||
* Fix the issue [#15](https://github.com/cosmtrek/air/issues/15) that favoring defaults if not in the config file.
|
||||
* Require Go 1.11+ and adopt `go mod` to manage dependencies.
|
||||
* Rewrite the config file comment.
|
||||
* Update the demo picture.
|
||||
|
||||
P.S.
|
||||
Bump version to 1.10 to celebrate the date(2018.01.10) that I fall in love with my girlfriend. Besides, today is also my girlfriend's birthday. Happy birthday to my girlfriend, Bay!
|
12
vendor/github.com/cosmtrek/air/Dockerfile
generated
vendored
Normal file
12
vendor/github.com/cosmtrek/air/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
FROM golang:1.16
|
||||
|
||||
MAINTAINER Rick Yu <cosmtrek@gmail.com>
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV GO111MODULE on
|
||||
|
||||
COPY . /go/src/github.com/cosmtrek/air
|
||||
WORKDIR /go/src/github.com/cosmtrek/air
|
||||
RUN make ci && make install
|
||||
|
||||
ENTRYPOINT ["/go/bin/air"]
|
675
vendor/github.com/cosmtrek/air/LICENSE
generated
vendored
Normal file
675
vendor/github.com/cosmtrek/air/LICENSE
generated
vendored
Normal file
@ -0,0 +1,675 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
{one line to give the program's name and a brief idea of what it does.}
|
||||
Copyright (C) {year} {name of author}
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
{project} Copyright (C) {year} {fullname}
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
||||
|
49
vendor/github.com/cosmtrek/air/Makefile
generated
vendored
Normal file
49
vendor/github.com/cosmtrek/air/Makefile
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
AIRVER := $(shell git describe --tags)
|
||||
LDFLAGS += -X "main.BuildTimestamp=$(shell date -u "+%Y-%m-%d %H:%M:%S")"
|
||||
LDFLAGS += -X "main.airVersion=$(AIRVER)"
|
||||
LDFLAGS += -X "main.goVersion=$(shell go version | sed -r 's/go version go(.*)\ .*/\1/')"
|
||||
|
||||
GO := GO111MODULE=on go
|
||||
|
||||
.PHONY: init
|
||||
init:
|
||||
go get -u golang.org/x/lint/golint
|
||||
go get -u golang.org/x/tools/cmd/goimports
|
||||
@echo "Install pre-commit hook"
|
||||
@ln -s $(shell pwd)/hooks/pre-commit $(shell pwd)/.git/hooks/pre-commit || true
|
||||
@chmod +x ./hack/check.sh
|
||||
|
||||
.PHONY: setup
|
||||
setup: init
|
||||
git init
|
||||
|
||||
.PHONY: check
|
||||
check:
|
||||
@./hack/check.sh ${scope}
|
||||
|
||||
.PHONY: ci
|
||||
ci: init
|
||||
@$(GO) mod tidy && $(GO) mod vendor
|
||||
|
||||
.PHONY: build
|
||||
build: check
|
||||
$(GO) build -ldflags '$(LDFLAGS)'
|
||||
|
||||
.PHONY: install
|
||||
install: check
|
||||
@echo "Installing air..."
|
||||
@$(GO) install -ldflags '$(LDFLAGS)'
|
||||
|
||||
.PHONY: release
|
||||
release: check
|
||||
GOOS=darwin GOARCH=amd64 $(GO) build -ldflags '$(LDFLAGS)' -o bin/darwin/air
|
||||
GOOS=linux GOARCH=amd64 $(GO) build -ldflags '$(LDFLAGS)' -o bin/linux/air
|
||||
GOOS=windows GOARCH=amd64 $(GO) build -ldflags '$(LDFLAGS)' -o bin/windows/air.exe
|
||||
|
||||
.PHONY: docker-image
|
||||
docker-image:
|
||||
docker build -t cosmtrek/air:$(AIRVER) -f ./Dockerfile .
|
||||
|
||||
.PHONY: push-docker-image
|
||||
push-docker-image:
|
||||
docker push cosmtrek/air:$(AIRVER)
|
136
vendor/github.com/cosmtrek/air/README.md
generated
vendored
Normal file
136
vendor/github.com/cosmtrek/air/README.md
generated
vendored
Normal file
@ -0,0 +1,136 @@
|
||||
# Air [](https://github.com/cosmtrek/air/actions?query=workflow%3AGo+branch%3Amaster) [](https://www.codacy.com/app/cosmtrek/air?utm_source=github.com&utm_medium=referral&utm_content=cosmtrek/air&utm_campaign=Badge_Grade) [](https://goreportcard.com/report/github.com/cosmtrek/air) [](https://codecov.io/gh/cosmtrek/air)
|
||||
|
||||
:cloud: Live reload for Go apps
|
||||
|
||||

|
||||
|
||||
## Motivation
|
||||
|
||||
When I get started with developing websites in Go and [gin](https://github.com/gin-gonic/gin) framework, it's a pity
|
||||
that gin lacks live-reloading function. In fact, I tried [fresh](https://github.com/pilu/fresh) and it seems not much
|
||||
flexible, so I intended to rewrite it in a better way. Finally, Air's born.
|
||||
In addition, great thanks to [pilu](https://github.com/pilu), no fresh, no air :)
|
||||
|
||||
Air is yet another live-reloading command line utility for Go applications in development. Just `air` in your project root directory, leave it alone,
|
||||
and focus on your code.
|
||||
|
||||
NOTE: This tool has nothing to do with hot-deploy for production.
|
||||
|
||||
## Features
|
||||
|
||||
* Colorful log output
|
||||
* Customize build or binary command
|
||||
* Support excluding subdirectories
|
||||
* Allow watching new directories after Air started
|
||||
* Better building process
|
||||
|
||||
## Installation
|
||||
|
||||
### Prefer install.sh
|
||||
|
||||
```bash
|
||||
# binary will be $(go env GOPATH)/bin/air
|
||||
curl -sSfL https://raw.githubusercontent.com/cosmtrek/air/master/install.sh | sh -s -- -b $(go env GOPATH)/bin
|
||||
|
||||
# or install it into ./bin/
|
||||
curl -sSfL https://raw.githubusercontent.com/cosmtrek/air/master/install.sh | sh -s
|
||||
|
||||
air -v
|
||||
```
|
||||
|
||||
P.S. Great thanks mattn's [PR](https://github.com/cosmtrek/air/pull/1) for supporting Windows platform.
|
||||
|
||||
### Docker
|
||||
|
||||
Please pull this docker image [cosmtrek/air](https://hub.docker.com/r/cosmtrek/air).
|
||||
|
||||
```bash
|
||||
docker run -it --rm \
|
||||
-w "<PROJECT>" \
|
||||
-e "air_wd=<PROJECT>" \
|
||||
-v $(pwd):<PROJECT> \
|
||||
-p <PORT>:<APP SERVER PORT> \
|
||||
cosmtrek/air
|
||||
-c <CONF>
|
||||
```
|
||||
|
||||
For example, one of my project runs in docker:
|
||||
|
||||
```bash
|
||||
docker run -it --rm \
|
||||
-w "/go/src/github.com/cosmtrek/hub" \
|
||||
-v $(pwd):/go/src/github.com/cosmtrek/hub \
|
||||
-p 9090:9090 \
|
||||
cosmtrek/air
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
For less typing, you could add `alias air='~/.air'` to your `.bashrc` or `.zshrc`.
|
||||
|
||||
First enter into your project
|
||||
|
||||
```bash
|
||||
cd /path/to/your_project
|
||||
```
|
||||
|
||||
The simplest usage is run
|
||||
|
||||
```bash
|
||||
# firstly find `.air.toml` in current directory, if not found, use defaults
|
||||
air -c .air.toml
|
||||
```
|
||||
|
||||
You can initialize the `.air.toml` configuration file to the current directory with the default settings running the following command.
|
||||
|
||||
```bash
|
||||
air init
|
||||
```
|
||||
|
||||
After this you can just run the `air` command without additional arguments and it will use the `.air.toml` file for configuration.
|
||||
|
||||
```bash
|
||||
air
|
||||
```
|
||||
|
||||
For modifying the configuration refer to the [air_example.toml](air_example.toml) file.
|
||||
|
||||
### Debug
|
||||
|
||||
`air -d` prints all logs.
|
||||
|
||||
## Development
|
||||
|
||||
Please note that it requires Go 1.13+ since I use `go mod` to manage dependencies.
|
||||
|
||||
```bash
|
||||
# 1. fork this project
|
||||
|
||||
# 2. clone it
|
||||
mkdir -p $GOPATH/src/github.com/cosmtrek
|
||||
cd $GOPATH/src/github.com/cosmtrek
|
||||
git clone git@github.com:<YOUR USERNAME>/air.git
|
||||
|
||||
# 3. install dependencies
|
||||
cd air
|
||||
make ci
|
||||
|
||||
# 4. explore it and happy hacking!
|
||||
make install
|
||||
```
|
||||
|
||||
BTW: Pull requests are welcome~
|
||||
|
||||
## Sponsor
|
||||
|
||||
<a href="https://www.buymeacoffee.com/36lcNbW" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/default-orange.png" alt="Buy Me A Coffee" style="height: 51px !important;width: 217px !important;" ></a>
|
||||
|
||||
Huge thanks to the following supporters. I've always been remembering your kindness.
|
||||
|
||||
* Peter Aba
|
||||
* Apostolis Anastasiou
|
||||
* keita koga
|
||||
|
||||
## License
|
||||
|
||||
[GNU General Public License v3.0](LICENSE)
|
53
vendor/github.com/cosmtrek/air/air_example.toml
generated
vendored
Normal file
53
vendor/github.com/cosmtrek/air/air_example.toml
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
# Config file for [Air](https://github.com/cosmtrek/air) in TOML format
|
||||
|
||||
# Working directory
|
||||
# . or absolute path, please note that the directories following must be under root.
|
||||
root = "."
|
||||
tmp_dir = "tmp"
|
||||
|
||||
[build]
|
||||
# Just plain old shell command. You could use `make` as well.
|
||||
cmd = "go build -o ./tmp/main ."
|
||||
# Binary file yields from `cmd`.
|
||||
bin = "tmp/main"
|
||||
# Customize binary.
|
||||
full_bin = "APP_ENV=dev APP_USER=air ./tmp/main"
|
||||
# Watch these filename extensions.
|
||||
include_ext = ["go", "tpl", "tmpl", "html"]
|
||||
# Ignore these filename extensions or directories.
|
||||
exclude_dir = ["assets", "tmp", "vendor", "frontend/node_modules"]
|
||||
# Watch these directories if you specified.
|
||||
include_dir = []
|
||||
# Exclude files.
|
||||
exclude_file = []
|
||||
# Exclude specific regular expressions.
|
||||
exclude_regex = []
|
||||
# Exclude unchanged files.
|
||||
exclude_unchanged = true
|
||||
# Follow symlink for directories
|
||||
follow_symlink = true
|
||||
# This log file places in your tmp_dir.
|
||||
log = "air.log"
|
||||
# It's not necessary to trigger build each time file changes if it's too frequent.
|
||||
delay = 1000 # ms
|
||||
# Stop running old binary when build errors occur.
|
||||
stop_on_error = true
|
||||
# Send Interrupt signal before killing process (windows does not support this feature)
|
||||
send_interrupt = false
|
||||
# Delay after sending Interrupt signal
|
||||
kill_delay = 500 # ms
|
||||
|
||||
[log]
|
||||
# Show log time
|
||||
time = false
|
||||
|
||||
[color]
|
||||
# Customize each part's color. If no color found, use the raw app log.
|
||||
main = "magenta"
|
||||
watcher = "cyan"
|
||||
build = "yellow"
|
||||
runner = "green"
|
||||
|
||||
[misc]
|
||||
# Delete tmp directory on exit
|
||||
clean_on_exit = true
|
11
vendor/github.com/cosmtrek/air/go.mod
generated
vendored
Normal file
11
vendor/github.com/cosmtrek/air/go.mod
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
module github.com/cosmtrek/air
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/creack/pty v1.1.11
|
||||
github.com/fatih/color v1.10.0
|
||||
github.com/fsnotify/fsnotify v1.4.9
|
||||
github.com/imdario/mergo v0.3.12
|
||||
github.com/pelletier/go-toml v1.8.1
|
||||
)
|
23
vendor/github.com/cosmtrek/air/go.sum
generated
vendored
Normal file
23
vendor/github.com/cosmtrek/air/go.sum
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=
|
||||
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
|
||||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM=
|
||||
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
375
vendor/github.com/cosmtrek/air/install.sh
generated
vendored
Normal file
375
vendor/github.com/cosmtrek/air/install.sh
generated
vendored
Normal file
@ -0,0 +1,375 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
# Code generated by godownloader on 2020-08-12T16:16:22Z. DO NOT EDIT.
|
||||
#
|
||||
|
||||
usage() {
|
||||
this=$1
|
||||
cat <<EOF
|
||||
$this: download go binaries for cosmtrek/air
|
||||
|
||||
Usage: $this [-b] bindir [-d] [tag]
|
||||
-b sets bindir or installation directory, Defaults to ./bin
|
||||
-d turns on debug logging
|
||||
[tag] is a tag from
|
||||
https://github.com/cosmtrek/air/releases
|
||||
If tag is missing, then the latest will be used.
|
||||
|
||||
Generated by godownloader
|
||||
https://github.com/goreleaser/godownloader
|
||||
|
||||
EOF
|
||||
exit 2
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
#BINDIR is ./bin unless set be ENV
|
||||
# over-ridden by flag below
|
||||
|
||||
BINDIR=${BINDIR:-./bin}
|
||||
while getopts "b:dh?x" arg; do
|
||||
case "$arg" in
|
||||
b) BINDIR="$OPTARG" ;;
|
||||
d) log_set_priority 10 ;;
|
||||
h | \?) usage "$0" ;;
|
||||
x) set -x ;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
TAG=$1
|
||||
}
|
||||
# this function wraps all the destructive operations
|
||||
# if a curl|bash cuts off the end of the script due to
|
||||
# network, either nothing will happen or will syntax error
|
||||
# out preventing half-done work
|
||||
execute() {
|
||||
tmpdir=$(mktemp -d)
|
||||
log_debug "downloading files into ${tmpdir}"
|
||||
http_download "${tmpdir}/${TARBALL}" "${TARBALL_URL}"
|
||||
http_download "${tmpdir}/${CHECKSUM}" "${CHECKSUM_URL}"
|
||||
hash_sha256_verify "${tmpdir}/${TARBALL}" "${tmpdir}/${CHECKSUM}"
|
||||
srcdir="${tmpdir}"
|
||||
(cd "${tmpdir}" && untar "${TARBALL}")
|
||||
test ! -d "${BINDIR}" && install -d "${BINDIR}"
|
||||
for binexe in $BINARIES; do
|
||||
if [ "$OS" = "windows" ]; then
|
||||
binexe="${binexe}.exe"
|
||||
fi
|
||||
install "${srcdir}/${binexe}" "${BINDIR}/"
|
||||
log_info "installed ${BINDIR}/${binexe}"
|
||||
done
|
||||
rm -rf "${tmpdir}"
|
||||
}
|
||||
get_binaries() {
|
||||
case "$PLATFORM" in
|
||||
darwin/amd64) BINARIES="air" ;;
|
||||
darwin/arm64) BINARIES="air" ;;
|
||||
linux/386) BINARIES="air" ;;
|
||||
linux/amd64) BINARIES="air" ;;
|
||||
windows/386) BINARIES="air" ;;
|
||||
windows/amd64) BINARIES="air" ;;
|
||||
*)
|
||||
log_crit "platform $PLATFORM is not supported. Make sure this script is up-to-date and file request at https://github.com/${PREFIX}/issues/new"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
tag_to_version() {
|
||||
if [ -z "${TAG}" ]; then
|
||||
log_info "checking GitHub for latest tag"
|
||||
else
|
||||
log_info "checking GitHub for tag '${TAG}'"
|
||||
fi
|
||||
REALTAG=$(github_release "$OWNER/$REPO" "${TAG}") && true
|
||||
if test -z "$REALTAG"; then
|
||||
log_crit "unable to find '${TAG}' - use 'latest' or see https://github.com/${PREFIX}/releases for details"
|
||||
exit 1
|
||||
fi
|
||||
# if version starts with 'v', remove it
|
||||
TAG="$REALTAG"
|
||||
VERSION=${TAG#v}
|
||||
}
|
||||
adjust_format() {
|
||||
# change format (tar.gz or zip) based on OS
|
||||
true
|
||||
}
|
||||
adjust_os() {
|
||||
# adjust archive name based on OS
|
||||
true
|
||||
}
|
||||
adjust_arch() {
|
||||
# adjust archive name based on ARCH
|
||||
true
|
||||
}
|
||||
|
||||
cat /dev/null <<EOF
|
||||
------------------------------------------------------------------------
|
||||
https://github.com/client9/shlib - portable posix shell functions
|
||||
Public domain - http://unlicense.org
|
||||
https://github.com/client9/shlib/blob/master/LICENSE.md
|
||||
but credit (and pull requests) appreciated.
|
||||
------------------------------------------------------------------------
|
||||
EOF
|
||||
is_command() {
|
||||
command -v "$1" >/dev/null
|
||||
}
|
||||
echoerr() {
|
||||
echo "$@" 1>&2
|
||||
}
|
||||
log_prefix() {
|
||||
echo "$0"
|
||||
}
|
||||
_logp=6
|
||||
log_set_priority() {
|
||||
_logp="$1"
|
||||
}
|
||||
log_priority() {
|
||||
if test -z "$1"; then
|
||||
echo "$_logp"
|
||||
return
|
||||
fi
|
||||
[ "$1" -le "$_logp" ]
|
||||
}
|
||||
log_tag() {
|
||||
case $1 in
|
||||
0) echo "emerg" ;;
|
||||
1) echo "alert" ;;
|
||||
2) echo "crit" ;;
|
||||
3) echo "err" ;;
|
||||
4) echo "warning" ;;
|
||||
5) echo "notice" ;;
|
||||
6) echo "info" ;;
|
||||
7) echo "debug" ;;
|
||||
*) echo "$1" ;;
|
||||
esac
|
||||
}
|
||||
log_debug() {
|
||||
log_priority 7 || return 0
|
||||
echoerr "$(log_prefix)" "$(log_tag 7)" "$@"
|
||||
}
|
||||
log_info() {
|
||||
log_priority 6 || return 0
|
||||
echoerr "$(log_prefix)" "$(log_tag 6)" "$@"
|
||||
}
|
||||
log_err() {
|
||||
log_priority 3 || return 0
|
||||
echoerr "$(log_prefix)" "$(log_tag 3)" "$@"
|
||||
}
|
||||
log_crit() {
|
||||
log_priority 2 || return 0
|
||||
echoerr "$(log_prefix)" "$(log_tag 2)" "$@"
|
||||
}
|
||||
uname_os() {
|
||||
os=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
case "$os" in
|
||||
cygwin_nt*) os="windows" ;;
|
||||
mingw*) os="windows" ;;
|
||||
msys_nt*) os="windows" ;;
|
||||
esac
|
||||
echo "$os"
|
||||
}
|
||||
uname_arch() {
|
||||
arch=$(uname -m)
|
||||
case $arch in
|
||||
x86_64) arch="amd64" ;;
|
||||
x86) arch="386" ;;
|
||||
i686) arch="386" ;;
|
||||
i386) arch="386" ;;
|
||||
aarch64) arch="arm64" ;;
|
||||
armv5*) arch="armv5" ;;
|
||||
armv6*) arch="armv6" ;;
|
||||
armv7*) arch="armv7" ;;
|
||||
esac
|
||||
echo ${arch}
|
||||
}
|
||||
uname_os_check() {
|
||||
os=$(uname_os)
|
||||
case "$os" in
|
||||
darwin) return 0 ;;
|
||||
dragonfly) return 0 ;;
|
||||
freebsd) return 0 ;;
|
||||
linux) return 0 ;;
|
||||
android) return 0 ;;
|
||||
nacl) return 0 ;;
|
||||
netbsd) return 0 ;;
|
||||
openbsd) return 0 ;;
|
||||
plan9) return 0 ;;
|
||||
solaris) return 0 ;;
|
||||
windows) return 0 ;;
|
||||
esac
|
||||
log_crit "uname_os_check '$(uname -s)' got converted to '$os' which is not a GOOS value. Please file bug at https://github.com/client9/shlib"
|
||||
return 1
|
||||
}
|
||||
uname_arch_check() {
|
||||
arch=$(uname_arch)
|
||||
case "$arch" in
|
||||
386) return 0 ;;
|
||||
amd64) return 0 ;;
|
||||
arm64) return 0 ;;
|
||||
armv5) return 0 ;;
|
||||
armv6) return 0 ;;
|
||||
armv7) return 0 ;;
|
||||
ppc64) return 0 ;;
|
||||
ppc64le) return 0 ;;
|
||||
mips) return 0 ;;
|
||||
mipsle) return 0 ;;
|
||||
mips64) return 0 ;;
|
||||
mips64le) return 0 ;;
|
||||
s390x) return 0 ;;
|
||||
amd64p32) return 0 ;;
|
||||
esac
|
||||
log_crit "uname_arch_check '$(uname -m)' got converted to '$arch' which is not a GOARCH value. Please file bug report at https://github.com/client9/shlib"
|
||||
return 1
|
||||
}
|
||||
untar() {
|
||||
tarball=$1
|
||||
case "${tarball}" in
|
||||
*.tar.gz | *.tgz) tar --no-same-owner -xzf "${tarball}" ;;
|
||||
*.tar) tar --no-same-owner -xf "${tarball}" ;;
|
||||
*.zip) unzip "${tarball}" ;;
|
||||
*)
|
||||
log_err "untar unknown archive format for ${tarball}"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
http_download_curl() {
|
||||
local_file=$1
|
||||
source_url=$2
|
||||
header=$3
|
||||
if [ -z "$header" ]; then
|
||||
code=$(curl -w '%{http_code}' -sL -o "$local_file" "$source_url")
|
||||
else
|
||||
code=$(curl -w '%{http_code}' -sL -H "$header" -o "$local_file" "$source_url")
|
||||
fi
|
||||
if [ "$code" != "200" ]; then
|
||||
log_debug "http_download_curl received HTTP status $code"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
http_download_wget() {
|
||||
local_file=$1
|
||||
source_url=$2
|
||||
header=$3
|
||||
if [ -z "$header" ]; then
|
||||
wget -q -O "$local_file" "$source_url"
|
||||
else
|
||||
wget -q --header "$header" -O "$local_file" "$source_url"
|
||||
fi
|
||||
}
|
||||
http_download() {
|
||||
log_debug "http_download $2"
|
||||
if is_command curl; then
|
||||
http_download_curl "$@"
|
||||
return
|
||||
elif is_command wget; then
|
||||
http_download_wget "$@"
|
||||
return
|
||||
fi
|
||||
log_crit "http_download unable to find wget or curl"
|
||||
return 1
|
||||
}
|
||||
http_copy() {
|
||||
tmp=$(mktemp)
|
||||
http_download "${tmp}" "$1" "$2" || return 1
|
||||
body=$(cat "$tmp")
|
||||
rm -f "${tmp}"
|
||||
echo "$body"
|
||||
}
|
||||
github_release() {
|
||||
owner_repo=$1
|
||||
version=$2
|
||||
test -z "$version" && version="latest"
|
||||
giturl="https://github.com/${owner_repo}/releases/${version}"
|
||||
json=$(http_copy "$giturl" "Accept:application/json")
|
||||
test -z "$json" && return 1
|
||||
version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//')
|
||||
test -z "$version" && return 1
|
||||
echo "$version"
|
||||
}
|
||||
hash_sha256() {
|
||||
TARGET=${1:-/dev/stdin}
|
||||
if is_command gsha256sum; then
|
||||
hash=$(gsha256sum "$TARGET") || return 1
|
||||
echo "$hash" | cut -d ' ' -f 1
|
||||
elif is_command sha256sum; then
|
||||
hash=$(sha256sum "$TARGET") || return 1
|
||||
echo "$hash" | cut -d ' ' -f 1
|
||||
elif is_command shasum; then
|
||||
hash=$(shasum -a 256 "$TARGET" 2>/dev/null) || return 1
|
||||
echo "$hash" | cut -d ' ' -f 1
|
||||
elif is_command openssl; then
|
||||
hash=$(openssl -dst openssl dgst -sha256 "$TARGET") || return 1
|
||||
echo "$hash" | cut -d ' ' -f a
|
||||
else
|
||||
log_crit "hash_sha256 unable to find command to compute sha-256 hash"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
hash_sha256_verify() {
|
||||
TARGET=$1
|
||||
checksums=$2
|
||||
if [ -z "$checksums" ]; then
|
||||
log_err "hash_sha256_verify checksum file not specified in arg2"
|
||||
return 1
|
||||
fi
|
||||
BASENAME=${TARGET##*/}
|
||||
want=$(grep "${BASENAME}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1)
|
||||
if [ -z "$want" ]; then
|
||||
log_err "hash_sha256_verify unable to find checksum for '${TARGET}' in '${checksums}'"
|
||||
return 1
|
||||
fi
|
||||
got=$(hash_sha256 "$TARGET")
|
||||
if [ "$want" != "$got" ]; then
|
||||
log_err "hash_sha256_verify checksum for '$TARGET' did not verify ${want} vs $got"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
cat /dev/null <<EOF
|
||||
------------------------------------------------------------------------
|
||||
End of functions from https://github.com/client9/shlib
|
||||
------------------------------------------------------------------------
|
||||
EOF
|
||||
|
||||
PROJECT_NAME="air"
|
||||
OWNER=cosmtrek
|
||||
REPO="air"
|
||||
BINARY=air
|
||||
FORMAT=tar.gz
|
||||
OS=$(uname_os)
|
||||
ARCH=$(uname_arch)
|
||||
PREFIX="$OWNER/$REPO"
|
||||
|
||||
# use in logging routines
|
||||
log_prefix() {
|
||||
echo "$PREFIX"
|
||||
}
|
||||
PLATFORM="${OS}/${ARCH}"
|
||||
GITHUB_DOWNLOAD=https://github.com/${OWNER}/${REPO}/releases/download
|
||||
|
||||
uname_os_check "$OS"
|
||||
uname_arch_check "$ARCH"
|
||||
|
||||
parse_args "$@"
|
||||
|
||||
get_binaries
|
||||
|
||||
tag_to_version
|
||||
|
||||
adjust_format
|
||||
|
||||
adjust_os
|
||||
|
||||
adjust_arch
|
||||
|
||||
log_info "found version: ${VERSION} for ${TAG}/${OS}/${ARCH}"
|
||||
|
||||
NAME=${PROJECT_NAME}_${VERSION}_${OS}_${ARCH}
|
||||
TARBALL=${NAME}.${FORMAT}
|
||||
TARBALL_URL=${GITHUB_DOWNLOAD}/${TAG}/${TARBALL}
|
||||
CHECKSUM=${PROJECT_NAME}_${VERSION}_checksums.txt
|
||||
CHECKSUM_URL=${GITHUB_DOWNLOAD}/${TAG}/${CHECKSUM}
|
||||
|
||||
|
||||
execute
|
75
vendor/github.com/cosmtrek/air/main.go
generated
vendored
Normal file
75
vendor/github.com/cosmtrek/air/main.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/cosmtrek/air/runner"
|
||||
)
|
||||
|
||||
var (
|
||||
cfgPath string
|
||||
debugMode bool
|
||||
showVersion bool
|
||||
)
|
||||
|
||||
func helpMessage() {
|
||||
fmt.Fprintf(flag.CommandLine.Output(), "Usage of %s:\n\n", os.Args[0])
|
||||
fmt.Printf("If no command is provided %s will start the runner with the provided flags\n\n", os.Args[0])
|
||||
fmt.Println("Commands:")
|
||||
fmt.Print(" init creates a .air.toml file with default settings to the current directory\n\n")
|
||||
|
||||
fmt.Println("Flags:")
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.Usage = helpMessage
|
||||
flag.StringVar(&cfgPath, "c", "", "config path")
|
||||
flag.BoolVar(&debugMode, "d", false, "debug mode")
|
||||
flag.BoolVar(&showVersion, "v", false, "show version")
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func main() {
|
||||
fmt.Printf(`
|
||||
__ _ ___
|
||||
/ /\ | | | |_)
|
||||
/_/--\ |_| |_| \_ %s, built with Go %s
|
||||
|
||||
`, airVersion, goVersion)
|
||||
|
||||
if showVersion {
|
||||
return
|
||||
}
|
||||
|
||||
if debugMode {
|
||||
fmt.Println("[debug] mode")
|
||||
}
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
var err error
|
||||
r, err := runner.NewEngine(cfgPath, debugMode)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
<-sigs
|
||||
r.Stop()
|
||||
}()
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
log.Fatalf("PANIC: %+v", e)
|
||||
}
|
||||
}()
|
||||
|
||||
r.Run()
|
||||
}
|
6
vendor/github.com/cosmtrek/air/runner/common.go
generated
vendored
Normal file
6
vendor/github.com/cosmtrek/air/runner/common.go
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
package runner
|
||||
|
||||
const (
|
||||
//PlatformWindows const for windows
|
||||
PlatformWindows = "windows"
|
||||
)
|
292
vendor/github.com/cosmtrek/air/runner/config.go
generated
vendored
Normal file
292
vendor/github.com/cosmtrek/air/runner/config.go
generated
vendored
Normal file
@ -0,0 +1,292 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/pelletier/go-toml"
|
||||
)
|
||||
|
||||
const (
|
||||
dftTOML = ".air.toml"
|
||||
dftConf = ".air.conf"
|
||||
airWd = "air_wd"
|
||||
)
|
||||
|
||||
type config struct {
|
||||
Root string `toml:"root"`
|
||||
TmpDir string `toml:"tmp_dir"`
|
||||
Build cfgBuild `toml:"build"`
|
||||
Color cfgColor `toml:"color"`
|
||||
Log cfgLog `toml:"log"`
|
||||
Misc cfgMisc `toml:"misc"`
|
||||
}
|
||||
|
||||
type cfgBuild struct {
|
||||
Cmd string `toml:"cmd"`
|
||||
Bin string `toml:"bin"`
|
||||
FullBin string `toml:"full_bin"`
|
||||
Log string `toml:"log"`
|
||||
IncludeExt []string `toml:"include_ext"`
|
||||
ExcludeDir []string `toml:"exclude_dir"`
|
||||
IncludeDir []string `toml:"include_dir"`
|
||||
ExcludeFile []string `toml:"exclude_file"`
|
||||
ExcludeRegex []string `toml:"exclude_regex"`
|
||||
ExcludeUnchanged bool `toml:"exclude_unchanged"`
|
||||
FollowSymlink bool `toml:"follow_symlink"`
|
||||
Delay int `toml:"delay"`
|
||||
StopOnError bool `toml:"stop_on_error"`
|
||||
SendInterrupt bool `toml:"send_interrupt"`
|
||||
KillDelay time.Duration `toml:"kill_delay"`
|
||||
regexCompiled []*regexp.Regexp
|
||||
}
|
||||
|
||||
func (c *cfgBuild) RegexCompiled() ([]*regexp.Regexp, error) {
|
||||
if len(c.ExcludeRegex) > 0 && len(c.regexCompiled) == 0 {
|
||||
c.regexCompiled = make([]*regexp.Regexp, 0, len(c.ExcludeRegex))
|
||||
for _, s := range c.ExcludeRegex {
|
||||
re, err := regexp.Compile(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.regexCompiled = append(c.regexCompiled, re)
|
||||
}
|
||||
}
|
||||
return c.regexCompiled, nil
|
||||
}
|
||||
|
||||
type cfgLog struct {
|
||||
AddTime bool `toml:"time"`
|
||||
}
|
||||
|
||||
type cfgColor struct {
|
||||
Main string `toml:"main"`
|
||||
Watcher string `toml:"watcher"`
|
||||
Build string `toml:"build"`
|
||||
Runner string `toml:"runner"`
|
||||
App string `toml:"app"`
|
||||
}
|
||||
|
||||
type cfgMisc struct {
|
||||
CleanOnExit bool `toml:"clean_on_exit"`
|
||||
}
|
||||
|
||||
func initConfig(path string) (cfg *config, err error) {
|
||||
if path == "" {
|
||||
cfg, err = defaultPathConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
cfg, err = readConfigOrDefault(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
err = mergo.Merge(cfg, defaultConfig())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = cfg.preprocess()
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
func writeDefaultConfig() {
|
||||
confFiles := []string{dftTOML, dftConf}
|
||||
|
||||
for _, fname := range confFiles {
|
||||
fstat, err := os.Stat(fname)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
log.Fatal("failed to check for existing configuration")
|
||||
return
|
||||
}
|
||||
if err == nil && fstat != nil {
|
||||
log.Fatal("configuration already exists")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
file, err := os.Create(dftTOML)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create a new confiuration: %+v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
config := defaultConfig()
|
||||
configFile, err := toml.Marshal(config)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to marshal the default configuration: %+v", err)
|
||||
}
|
||||
|
||||
_, err = file.Write(configFile)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to write to %s: %+v", dftTOML, err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s file created to the current directory with the default settings\n", dftTOML)
|
||||
}
|
||||
|
||||
func defaultPathConfig() (*config, error) {
|
||||
// when path is blank, first find `.air.toml`, `.air.conf` in `air_wd` and current working directory, if not found, use defaults
|
||||
for _, name := range []string{dftTOML, dftConf} {
|
||||
cfg, err := readConfByName(name)
|
||||
if err == nil {
|
||||
if name == dftConf {
|
||||
fmt.Println("`.air.conf` will be deprecated soon, recommend using `.air.toml`.")
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
}
|
||||
|
||||
dftCfg := defaultConfig()
|
||||
return &dftCfg, nil
|
||||
}
|
||||
|
||||
func readConfByName(name string) (*config, error) {
|
||||
var path string
|
||||
if wd := os.Getenv(airWd); wd != "" {
|
||||
path = filepath.Join(wd, name)
|
||||
} else {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
path = filepath.Join(wd, name)
|
||||
}
|
||||
cfg, err := readConfig(path)
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
func defaultConfig() config {
|
||||
build := cfgBuild{
|
||||
Cmd: "go build -o ./tmp/main .",
|
||||
Bin: "./tmp/main",
|
||||
Log: "build-errors.log",
|
||||
IncludeExt: []string{"go", "tpl", "tmpl", "html"},
|
||||
ExcludeDir: []string{"assets", "tmp", "vendor"},
|
||||
Delay: 1000,
|
||||
StopOnError: true,
|
||||
}
|
||||
if runtime.GOOS == PlatformWindows {
|
||||
build.Bin = `tmp\main.exe`
|
||||
build.Cmd = "go build -o ./tmp/main.exe ."
|
||||
}
|
||||
log := cfgLog{
|
||||
AddTime: false,
|
||||
}
|
||||
color := cfgColor{
|
||||
Main: "magenta",
|
||||
Watcher: "cyan",
|
||||
Build: "yellow",
|
||||
Runner: "green",
|
||||
}
|
||||
misc := cfgMisc{
|
||||
CleanOnExit: false,
|
||||
}
|
||||
return config{
|
||||
Root: ".",
|
||||
TmpDir: "tmp",
|
||||
Build: build,
|
||||
Color: color,
|
||||
Log: log,
|
||||
Misc: misc,
|
||||
}
|
||||
}
|
||||
|
||||
func readConfig(path string) (*config, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := new(config)
|
||||
if err = toml.Unmarshal(data, cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func readConfigOrDefault(path string) (*config, error) {
|
||||
dftCfg := defaultConfig()
|
||||
cfg, err := readConfig(path)
|
||||
if err != nil {
|
||||
return &dftCfg, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func (c *config) preprocess() error {
|
||||
var err error
|
||||
cwd := os.Getenv(airWd)
|
||||
if cwd != "" {
|
||||
if err = os.Chdir(cwd); err != nil {
|
||||
return err
|
||||
}
|
||||
c.Root = cwd
|
||||
}
|
||||
c.Root, err = expandPath(c.Root)
|
||||
if c.TmpDir == "" {
|
||||
c.TmpDir = "tmp"
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ed := c.Build.ExcludeDir
|
||||
for i := range ed {
|
||||
ed[i] = cleanPath(ed[i])
|
||||
}
|
||||
|
||||
adaptToVariousPlatforms(c)
|
||||
|
||||
c.Build.ExcludeDir = ed
|
||||
if len(c.Build.FullBin) > 0 {
|
||||
c.Build.Bin = c.Build.FullBin
|
||||
return err
|
||||
}
|
||||
// Fix windows CMD processor
|
||||
// CMD will not recognize relative path like ./tmp/server
|
||||
c.Build.Bin, err = filepath.Abs(c.Build.Bin)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *config) colorInfo() map[string]string {
|
||||
return map[string]string{
|
||||
"main": c.Color.Main,
|
||||
"build": c.Color.Build,
|
||||
"runner": c.Color.Runner,
|
||||
"watcher": c.Color.Watcher,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *config) buildLogPath() string {
|
||||
return filepath.Join(c.tmpPath(), c.Build.Log)
|
||||
}
|
||||
|
||||
func (c *config) buildDelay() time.Duration {
|
||||
return time.Duration(c.Build.Delay) * time.Millisecond
|
||||
}
|
||||
|
||||
func (c *config) binPath() string {
|
||||
return filepath.Join(c.Root, c.Build.Bin)
|
||||
}
|
||||
|
||||
func (c *config) tmpPath() string {
|
||||
return filepath.Join(c.Root, c.TmpDir)
|
||||
}
|
||||
|
||||
func (c *config) rel(path string) string {
|
||||
s, err := filepath.Rel(c.Root, path)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return s
|
||||
}
|
471
vendor/github.com/cosmtrek/air/runner/engine.go
generated
vendored
Normal file
471
vendor/github.com/cosmtrek/air/runner/engine.go
generated
vendored
Normal file
@ -0,0 +1,471 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
// Engine ...
|
||||
type Engine struct {
|
||||
config *config
|
||||
logger *logger
|
||||
watcher *fsnotify.Watcher
|
||||
debugMode bool
|
||||
|
||||
eventCh chan string
|
||||
watcherStopCh chan bool
|
||||
buildRunCh chan bool
|
||||
buildRunStopCh chan bool
|
||||
binStopCh chan bool
|
||||
exitCh chan bool
|
||||
|
||||
mu sync.RWMutex
|
||||
binRunning bool
|
||||
watchers uint
|
||||
fileChecksums *checksumMap
|
||||
|
||||
ll sync.Mutex // lock for logger
|
||||
}
|
||||
|
||||
// NewEngine ...
|
||||
func NewEngine(cfgPath string, debugMode bool) (*Engine, error) {
|
||||
var err error
|
||||
cfg, err := initConfig(cfgPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logger := newLogger(cfg)
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e := Engine{
|
||||
config: cfg,
|
||||
logger: logger,
|
||||
watcher: watcher,
|
||||
debugMode: debugMode,
|
||||
eventCh: make(chan string, 1000),
|
||||
watcherStopCh: make(chan bool, 10),
|
||||
buildRunCh: make(chan bool, 1),
|
||||
buildRunStopCh: make(chan bool, 1),
|
||||
binStopCh: make(chan bool),
|
||||
exitCh: make(chan bool),
|
||||
binRunning: false,
|
||||
watchers: 0,
|
||||
}
|
||||
|
||||
if cfg.Build.ExcludeUnchanged {
|
||||
e.fileChecksums = &checksumMap{m: make(map[string]string)}
|
||||
}
|
||||
|
||||
return &e, nil
|
||||
}
|
||||
|
||||
// Run run run
|
||||
func (e *Engine) Run() {
|
||||
if len(os.Args) > 1 && os.Args[1] == "init" {
|
||||
writeDefaultConfig()
|
||||
return
|
||||
}
|
||||
|
||||
e.mainDebug("CWD: %s", e.config.Root)
|
||||
|
||||
var err error
|
||||
if err = e.checkRunEnv(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
if err = e.watching(e.config.Root); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
e.start()
|
||||
e.cleanup()
|
||||
}
|
||||
|
||||
func (e *Engine) checkRunEnv() error {
|
||||
p := e.config.tmpPath()
|
||||
if _, err := os.Stat(p); os.IsNotExist(err) {
|
||||
e.runnerLog("mkdir %s", p)
|
||||
if err := os.Mkdir(p, 0755); err != nil {
|
||||
e.runnerLog("failed to mkdir, error: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Engine) watching(root string) error {
|
||||
return filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
|
||||
// NOTE: path is absolute
|
||||
if info != nil && !info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
// exclude tmp dir
|
||||
if e.isTmpDir(path) {
|
||||
e.watcherLog("!exclude %s", e.config.rel(path))
|
||||
return filepath.SkipDir
|
||||
}
|
||||
// exclude hidden directories like .git, .idea, etc.
|
||||
if isHiddenDirectory(path) {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
// exclude user specified directories
|
||||
if e.isExcludeDir(path) {
|
||||
e.watcherLog("!exclude %s", e.config.rel(path))
|
||||
return filepath.SkipDir
|
||||
}
|
||||
isIn, walkDir := e.checkIncludeDir(path)
|
||||
if !walkDir {
|
||||
e.watcherLog("!exclude %s", e.config.rel(path))
|
||||
return filepath.SkipDir
|
||||
}
|
||||
if isIn {
|
||||
return e.watchDir(path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// cacheFileChecksums calculates and stores checksums for each non-excluded file it finds from root.
|
||||
func (e *Engine) cacheFileChecksums(root string) error {
|
||||
return filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
if info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if !info.Mode().IsRegular() {
|
||||
if e.isTmpDir(path) || isHiddenDirectory(path) || e.isExcludeDir(path) {
|
||||
e.watcherDebug("!exclude checksum %s", e.config.rel(path))
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
// Follow symbolic link
|
||||
if e.config.Build.FollowSymlink && (info.Mode()&os.ModeSymlink) > 0 {
|
||||
link, err := filepath.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
linkInfo, err := os.Stat(link)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if linkInfo.IsDir() {
|
||||
err = e.watchDir(link)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if e.isExcludeFile(path) || !e.isIncludeExt(path) {
|
||||
e.watcherDebug("!exclude checksum %s", e.config.rel(path))
|
||||
return nil
|
||||
}
|
||||
|
||||
excludeRegex, err := e.isExcludeRegex(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if excludeRegex {
|
||||
e.watcherDebug("!exclude checksum %s", e.config.rel(path))
|
||||
return nil
|
||||
}
|
||||
|
||||
// update the checksum cache for the current file
|
||||
_ = e.isModified(path)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (e *Engine) watchDir(path string) error {
|
||||
if err := e.watcher.Add(path); err != nil {
|
||||
e.watcherLog("failed to watching %s, error: %s", path, err.Error())
|
||||
return err
|
||||
}
|
||||
e.watcherLog("watching %s", e.config.rel(path))
|
||||
|
||||
go func() {
|
||||
e.withLock(func() {
|
||||
e.watchers++
|
||||
})
|
||||
defer func() {
|
||||
e.withLock(func() {
|
||||
e.watchers--
|
||||
})
|
||||
}()
|
||||
|
||||
if e.config.Build.ExcludeUnchanged {
|
||||
err := e.cacheFileChecksums(path)
|
||||
if err != nil {
|
||||
e.watcherLog("error building checksum cache: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-e.watcherStopCh:
|
||||
return
|
||||
case ev := <-e.watcher.Events:
|
||||
e.mainDebug("event: %+v", ev)
|
||||
if !validEvent(ev) {
|
||||
break
|
||||
}
|
||||
if isDir(ev.Name) {
|
||||
e.watchNewDir(ev.Name, removeEvent(ev))
|
||||
break
|
||||
}
|
||||
if e.isExcludeFile(ev.Name) {
|
||||
break
|
||||
}
|
||||
excludeRegex, _ := e.isExcludeRegex(ev.Name)
|
||||
if excludeRegex {
|
||||
break
|
||||
}
|
||||
if !e.isIncludeExt(ev.Name) {
|
||||
break
|
||||
}
|
||||
e.watcherDebug("%s has changed", e.config.rel(ev.Name))
|
||||
e.eventCh <- ev.Name
|
||||
case err := <-e.watcher.Errors:
|
||||
e.watcherLog("error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Engine) watchNewDir(dir string, removeDir bool) {
|
||||
if e.isTmpDir(dir) {
|
||||
return
|
||||
}
|
||||
if isHiddenDirectory(dir) || e.isExcludeDir(dir) {
|
||||
e.watcherLog("!exclude %s", e.config.rel(dir))
|
||||
return
|
||||
}
|
||||
if removeDir {
|
||||
if err := e.watcher.Remove(dir); err != nil {
|
||||
e.watcherLog("failed to stop watching %s, error: %s", dir, err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
go func(dir string) {
|
||||
if err := e.watching(dir); err != nil {
|
||||
e.watcherLog("failed to watching %s, error: %s", dir, err.Error())
|
||||
}
|
||||
}(dir)
|
||||
}
|
||||
|
||||
func (e *Engine) isModified(filename string) bool {
|
||||
newChecksum, err := fileChecksum(filename)
|
||||
if err != nil {
|
||||
e.watcherDebug("can't determine if file was changed: %v - assuming it did without updating cache", err)
|
||||
return true
|
||||
}
|
||||
|
||||
if e.fileChecksums.updateFileChecksum(filename, newChecksum) {
|
||||
e.watcherDebug("stored checksum for %s: %s", e.config.rel(filename), newChecksum)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Endless loop and never return
|
||||
func (e *Engine) start() {
|
||||
firstRunCh := make(chan bool, 1)
|
||||
firstRunCh <- true
|
||||
|
||||
for {
|
||||
var filename string
|
||||
|
||||
select {
|
||||
case <-e.exitCh:
|
||||
return
|
||||
case filename = <-e.eventCh:
|
||||
time.Sleep(e.config.buildDelay())
|
||||
e.flushEvents()
|
||||
if !e.isIncludeExt(filename) {
|
||||
continue
|
||||
}
|
||||
if e.config.Build.ExcludeUnchanged {
|
||||
if !e.isModified(filename) {
|
||||
e.mainLog("skipping %s because contents unchanged", e.config.rel(filename))
|
||||
continue
|
||||
}
|
||||
}
|
||||
e.mainLog("%s has changed", e.config.rel(filename))
|
||||
case <-firstRunCh:
|
||||
// go down
|
||||
break
|
||||
}
|
||||
|
||||
select {
|
||||
case <-e.buildRunCh:
|
||||
e.buildRunStopCh <- true
|
||||
default:
|
||||
}
|
||||
e.withLock(func() {
|
||||
if e.binRunning {
|
||||
e.binStopCh <- true
|
||||
}
|
||||
})
|
||||
go e.buildRun()
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Engine) buildRun() {
|
||||
e.buildRunCh <- true
|
||||
defer func() {
|
||||
<-e.buildRunCh
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-e.buildRunStopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
var err error
|
||||
if err = e.building(); err != nil {
|
||||
e.buildLog("failed to build, error: %s", err.Error())
|
||||
_ = e.writeBuildErrorLog(err.Error())
|
||||
if e.config.Build.StopOnError {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-e.buildRunStopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
if err = e.runBin(); err != nil {
|
||||
e.runnerLog("failed to run, error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Engine) flushEvents() {
|
||||
for {
|
||||
select {
|
||||
case <-e.eventCh:
|
||||
e.mainDebug("flushing events")
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Engine) building() error {
|
||||
var err error
|
||||
e.buildLog("building...")
|
||||
cmd, stdout, stderr, err := e.startCmd(e.config.Build.Cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
stdout.Close()
|
||||
stderr.Close()
|
||||
}()
|
||||
_, _ = io.Copy(os.Stdout, stdout)
|
||||
_, _ = io.Copy(os.Stderr, stderr)
|
||||
// wait for building
|
||||
err = cmd.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Engine) runBin() error {
|
||||
var err error
|
||||
e.runnerLog("running...")
|
||||
cmd, stdout, stderr, err := e.startCmd(e.config.Build.Bin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.withLock(func() {
|
||||
e.binRunning = true
|
||||
})
|
||||
|
||||
go func() {
|
||||
_, _ = io.Copy(os.Stdout, stdout)
|
||||
_, _ = io.Copy(os.Stderr, stderr)
|
||||
}()
|
||||
|
||||
go func(cmd *exec.Cmd, stdout io.ReadCloser, stderr io.ReadCloser) {
|
||||
<-e.binStopCh
|
||||
e.mainDebug("trying to kill cmd %+v", cmd.Args)
|
||||
defer func() {
|
||||
stdout.Close()
|
||||
stderr.Close()
|
||||
}()
|
||||
|
||||
var err error
|
||||
pid, err := e.killCmd(cmd)
|
||||
if err != nil {
|
||||
e.mainDebug("failed to kill PID %d, error: %s", pid, err.Error())
|
||||
if cmd.ProcessState != nil && !cmd.ProcessState.Exited() {
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
e.mainDebug("cmd killed, pid: %d", pid)
|
||||
}
|
||||
e.withLock(func() {
|
||||
e.binRunning = false
|
||||
})
|
||||
cmdBinPath := cmdPath(e.config.rel(e.config.binPath()))
|
||||
if _, err = os.Stat(cmdBinPath); os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
if err = os.Remove(cmdBinPath); err != nil {
|
||||
e.mainLog("failed to remove %s, error: %s", e.config.rel(e.config.binPath()), err)
|
||||
}
|
||||
}(cmd, stdout, stderr)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Engine) cleanup() {
|
||||
e.mainLog("cleaning...")
|
||||
defer e.mainLog("see you again~")
|
||||
|
||||
e.withLock(func() {
|
||||
if e.binRunning {
|
||||
e.binStopCh <- true
|
||||
}
|
||||
})
|
||||
|
||||
e.withLock(func() {
|
||||
for i := 0; i < int(e.watchers); i++ {
|
||||
e.watcherStopCh <- true
|
||||
}
|
||||
})
|
||||
|
||||
var err error
|
||||
if err = e.watcher.Close(); err != nil {
|
||||
e.mainLog("failed to close watcher, error: %s", err.Error())
|
||||
}
|
||||
|
||||
if e.config.Misc.CleanOnExit {
|
||||
e.mainLog("deleting %s", e.config.tmpPath())
|
||||
if err = os.RemoveAll(e.config.tmpPath()); err != nil {
|
||||
e.mainLog("failed to delete tmp dir, err: %+v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop the air
|
||||
func (e *Engine) Stop() {
|
||||
e.exitCh <- true
|
||||
}
|
112
vendor/github.com/cosmtrek/air/runner/logger.go
generated
vendored
Normal file
112
vendor/github.com/cosmtrek/air/runner/logger.go
generated
vendored
Normal file
@ -0,0 +1,112 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/fatih/color"
|
||||
)
|
||||
|
||||
var (
|
||||
rawColor = "raw"
|
||||
// TODO: support more colors
|
||||
colorMap = map[string]color.Attribute{
|
||||
"red": color.FgRed,
|
||||
"green": color.FgGreen,
|
||||
"yellow": color.FgYellow,
|
||||
"blue": color.FgBlue,
|
||||
"magenta": color.FgMagenta,
|
||||
"cyan": color.FgCyan,
|
||||
"white": color.FgWhite,
|
||||
}
|
||||
)
|
||||
|
||||
type logFunc func(string, ...interface{})
|
||||
|
||||
type logger struct {
|
||||
config *config
|
||||
colors map[string]string
|
||||
loggers map[string]logFunc
|
||||
}
|
||||
|
||||
func newLogger(cfg *config) *logger {
|
||||
if cfg == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
colors := cfg.colorInfo()
|
||||
loggers := make(map[string]logFunc, len(colors))
|
||||
for name, nameColor := range colors {
|
||||
loggers[name] = newLogFunc(nameColor, cfg.Log)
|
||||
}
|
||||
loggers["default"] = defaultLogger()
|
||||
return &logger{
|
||||
config: cfg,
|
||||
colors: colors,
|
||||
loggers: loggers,
|
||||
}
|
||||
}
|
||||
|
||||
func newLogFunc(colorname string, cfg cfgLog) logFunc {
|
||||
return func(msg string, v ...interface{}) {
|
||||
// There are some escape sequences to format color in terminal, so cannot
|
||||
// just trim new line from right.
|
||||
msg = strings.Replace(msg, "\n", "", -1)
|
||||
msg = strings.TrimSpace(msg)
|
||||
if len(msg) == 0 {
|
||||
return
|
||||
}
|
||||
// TODO: filter msg by regex
|
||||
msg = msg + "\n"
|
||||
if cfg.AddTime {
|
||||
t := time.Now().Format("15:04:05")
|
||||
msg = fmt.Sprintf("[%s] %s", t, msg)
|
||||
}
|
||||
if colorname == rawColor {
|
||||
fmt.Fprintf(os.Stdout, msg, v...)
|
||||
} else {
|
||||
color.New(getColor(colorname)).Fprintf(color.Output, msg, v...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getColor(name string) color.Attribute {
|
||||
if v, ok := colorMap[name]; ok {
|
||||
return v
|
||||
}
|
||||
return color.FgWhite
|
||||
}
|
||||
|
||||
func (l *logger) main() logFunc {
|
||||
return l.getLogger("main")
|
||||
}
|
||||
|
||||
func (l *logger) build() logFunc {
|
||||
return l.getLogger("build")
|
||||
}
|
||||
|
||||
func (l *logger) runner() logFunc {
|
||||
return l.getLogger("runner")
|
||||
}
|
||||
|
||||
func (l *logger) watcher() logFunc {
|
||||
return l.getLogger("watcher")
|
||||
}
|
||||
|
||||
func rawLogger() logFunc {
|
||||
return newLogFunc("raw", defaultConfig().Log)
|
||||
}
|
||||
|
||||
func defaultLogger() logFunc {
|
||||
return newLogFunc("white", defaultConfig().Log)
|
||||
}
|
||||
|
||||
func (l *logger) getLogger(name string) logFunc {
|
||||
v, ok := l.loggers[name]
|
||||
if !ok {
|
||||
return rawLogger()
|
||||
}
|
||||
return v
|
||||
}
|
268
vendor/github.com/cosmtrek/air/runner/util.go
generated
vendored
Normal file
268
vendor/github.com/cosmtrek/air/runner/util.go
generated
vendored
Normal file
@ -0,0 +1,268 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
func (e *Engine) mainLog(format string, v ...interface{}) {
|
||||
e.logWithLock(func() {
|
||||
e.logger.main()(format, v...)
|
||||
})
|
||||
}
|
||||
|
||||
func (e *Engine) mainDebug(format string, v ...interface{}) {
|
||||
if e.debugMode {
|
||||
e.mainLog(format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Engine) buildLog(format string, v ...interface{}) {
|
||||
e.logWithLock(func() {
|
||||
e.logger.build()(format, v...)
|
||||
})
|
||||
}
|
||||
|
||||
func (e *Engine) runnerLog(format string, v ...interface{}) {
|
||||
e.logWithLock(func() {
|
||||
e.logger.runner()(format, v...)
|
||||
})
|
||||
}
|
||||
|
||||
func (e *Engine) watcherLog(format string, v ...interface{}) {
|
||||
e.logWithLock(func() {
|
||||
e.logger.watcher()(format, v...)
|
||||
})
|
||||
}
|
||||
|
||||
func (e *Engine) watcherDebug(format string, v ...interface{}) {
|
||||
if e.debugMode {
|
||||
e.watcherLog(format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Engine) isTmpDir(path string) bool {
|
||||
return path == e.config.tmpPath()
|
||||
}
|
||||
|
||||
func isHiddenDirectory(path string) bool {
|
||||
return len(path) > 1 && strings.HasPrefix(filepath.Base(path), ".")
|
||||
}
|
||||
|
||||
func cleanPath(path string) string {
|
||||
return strings.TrimSuffix(strings.TrimSpace(path), "/")
|
||||
}
|
||||
|
||||
func (e *Engine) isExcludeDir(path string) bool {
|
||||
cleanName := cleanPath(e.config.rel(path))
|
||||
for _, d := range e.config.Build.ExcludeDir {
|
||||
if cleanName == d {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// return isIncludeDir, walkDir
|
||||
func (e *Engine) checkIncludeDir(path string) (bool, bool) {
|
||||
cleanName := cleanPath(e.config.rel(path))
|
||||
iDir := e.config.Build.IncludeDir
|
||||
if len(iDir) == 0 { // ignore empty
|
||||
return true, true
|
||||
}
|
||||
if cleanName == "." {
|
||||
return false, true
|
||||
}
|
||||
walkDir := false
|
||||
for _, d := range iDir {
|
||||
if d == cleanName {
|
||||
return true, true
|
||||
}
|
||||
if strings.HasPrefix(cleanName, d) { // current dir is sub-directory of `d`
|
||||
return true, true
|
||||
}
|
||||
if strings.HasPrefix(d, cleanName) { // `d` is sub-directory of current dir
|
||||
walkDir = true
|
||||
}
|
||||
}
|
||||
return false, walkDir
|
||||
}
|
||||
|
||||
func (e *Engine) isIncludeExt(path string) bool {
|
||||
ext := filepath.Ext(path)
|
||||
for _, v := range e.config.Build.IncludeExt {
|
||||
if ext == "."+strings.TrimSpace(v) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e *Engine) isExcludeRegex(path string) (bool, error) {
|
||||
regexes, err := e.config.Build.RegexCompiled()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, re := range regexes {
|
||||
if re.Match([]byte(path)) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (e *Engine) isExcludeFile(path string) bool {
|
||||
cleanName := cleanPath(e.config.rel(path))
|
||||
for _, d := range e.config.Build.ExcludeFile {
|
||||
matched, err := filepath.Match(d, cleanName)
|
||||
if err == nil && matched {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e *Engine) writeBuildErrorLog(msg string) error {
|
||||
var err error
|
||||
f, err := os.OpenFile(e.config.buildLogPath(), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = f.Write([]byte(msg)); err != nil {
|
||||
return err
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
func (e *Engine) withLock(f func()) {
|
||||
e.mu.Lock()
|
||||
f()
|
||||
e.mu.Unlock()
|
||||
}
|
||||
|
||||
func (e *Engine) logWithLock(f func()) {
|
||||
e.ll.Lock()
|
||||
f()
|
||||
e.ll.Unlock()
|
||||
}
|
||||
|
||||
func expandPath(path string) (string, error) {
|
||||
if strings.HasPrefix(path, "~/") {
|
||||
home := os.Getenv("HOME")
|
||||
return home + path[1:], nil
|
||||
}
|
||||
var err error
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if path == "." {
|
||||
return wd, nil
|
||||
}
|
||||
if strings.HasPrefix(path, "./") {
|
||||
return wd + path[1:], nil
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func isDir(path string) bool {
|
||||
i, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return i.IsDir()
|
||||
}
|
||||
|
||||
func validEvent(ev fsnotify.Event) bool {
|
||||
return ev.Op&fsnotify.Create == fsnotify.Create ||
|
||||
ev.Op&fsnotify.Write == fsnotify.Write ||
|
||||
ev.Op&fsnotify.Remove == fsnotify.Remove
|
||||
}
|
||||
|
||||
func removeEvent(ev fsnotify.Event) bool {
|
||||
return ev.Op&fsnotify.Remove == fsnotify.Remove
|
||||
}
|
||||
|
||||
func cmdPath(path string) string {
|
||||
return strings.Split(path, " ")[0]
|
||||
}
|
||||
|
||||
func adaptToVariousPlatforms(c *config) {
|
||||
// Fix the default configuration is not used in Windows
|
||||
// Use the unix configuration on Windows
|
||||
if runtime.GOOS == PlatformWindows {
|
||||
|
||||
runName := "start"
|
||||
extName := ".exe"
|
||||
originBin := c.Build.Bin
|
||||
if !strings.HasSuffix(c.Build.Bin, extName) {
|
||||
|
||||
c.Build.Bin += extName
|
||||
}
|
||||
|
||||
if 0 < len(c.Build.FullBin) {
|
||||
|
||||
if !strings.HasSuffix(c.Build.FullBin, extName) {
|
||||
|
||||
c.Build.FullBin += extName
|
||||
}
|
||||
if !strings.HasPrefix(c.Build.FullBin, runName) {
|
||||
c.Build.FullBin = runName + " /b " + c.Build.FullBin
|
||||
}
|
||||
}
|
||||
|
||||
// bin=/tmp/main cmd=go build -o ./tmp/main.exe main.go
|
||||
if !strings.Contains(c.Build.Cmd, c.Build.Bin) && strings.Contains(c.Build.Cmd, originBin) {
|
||||
c.Build.Cmd = strings.Replace(c.Build.Cmd, originBin, c.Build.Bin, 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fileChecksum returns a checksum for the given file's contents.
|
||||
func fileChecksum(filename string) (checksum string, err error) {
|
||||
contents, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// If the file is empty, an editor might've been in the process of rewriting the file when we read it.
|
||||
// This can happen often if editors are configured to run format after save.
|
||||
// Instead of calculating a new checksum, we'll assume the file was unchanged, but return an error to force a rebuild anyway.
|
||||
if len(contents) == 0 {
|
||||
return "", errors.New("empty file, forcing rebuild without updating checksum")
|
||||
}
|
||||
|
||||
h := sha256.New()
|
||||
if _, err := h.Write(contents); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// checksumMap is a thread-safe map to store file checksums.
|
||||
type checksumMap struct {
|
||||
l sync.Mutex
|
||||
m map[string]string
|
||||
}
|
||||
|
||||
// update updates the filename with the given checksum if different.
|
||||
func (a *checksumMap) updateFileChecksum(filename, newChecksum string) (ok bool) {
|
||||
a.l.Lock()
|
||||
defer a.l.Unlock()
|
||||
oldChecksum, ok := a.m[filename]
|
||||
if !ok || oldChecksum != newChecksum {
|
||||
a.m[filename] = newChecksum
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
33
vendor/github.com/cosmtrek/air/runner/util_darwin.go
generated
vendored
Normal file
33
vendor/github.com/cosmtrek/air/runner/util_darwin.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/creack/pty"
|
||||
)
|
||||
|
||||
func (e *Engine) killCmd(cmd *exec.Cmd) (pid int, err error) {
|
||||
pid = cmd.Process.Pid
|
||||
|
||||
if e.config.Build.SendInterrupt {
|
||||
// Sending a signal to make it clear to the process that it is time to turn off
|
||||
if err = syscall.Kill(-pid, syscall.SIGINT); err != nil {
|
||||
return
|
||||
}
|
||||
time.Sleep(e.config.Build.KillDelay * time.Millisecond)
|
||||
}
|
||||
// https://stackoverflow.com/questions/22470193/why-wont-go-kill-a-child-process-correctly
|
||||
err = syscall.Kill(-pid, syscall.SIGKILL)
|
||||
// Wait releases any resources associated with the Process.
|
||||
_, _ = cmd.Process.Wait()
|
||||
return pid, err
|
||||
}
|
||||
|
||||
func (e *Engine) startCmd(cmd string) (*exec.Cmd, io.ReadCloser, io.ReadCloser, error) {
|
||||
c := exec.Command("/bin/sh", "-c", cmd)
|
||||
f, err := pty.Start(c)
|
||||
return c, f, f, err
|
||||
}
|
35
vendor/github.com/cosmtrek/air/runner/util_linux.go
generated
vendored
Normal file
35
vendor/github.com/cosmtrek/air/runner/util_linux.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/creack/pty"
|
||||
)
|
||||
|
||||
func (e *Engine) killCmd(cmd *exec.Cmd) (pid int, err error) {
|
||||
pid = cmd.Process.Pid
|
||||
|
||||
if e.config.Build.SendInterrupt {
|
||||
// Sending a signal to make it clear to the process that it is time to turn off
|
||||
if err = syscall.Kill(-pid, syscall.SIGINT); err != nil {
|
||||
return
|
||||
}
|
||||
time.Sleep(e.config.Build.KillDelay * time.Millisecond)
|
||||
}
|
||||
|
||||
// https://stackoverflow.com/questions/22470193/why-wont-go-kill-a-child-process-correctly
|
||||
err = syscall.Kill(-pid, syscall.SIGKILL)
|
||||
|
||||
// Wait releases any resources associated with the Process.
|
||||
_, _ = cmd.Process.Wait()
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Engine) startCmd(cmd string) (*exec.Cmd, io.ReadCloser, io.ReadCloser, error) {
|
||||
c := exec.Command("/bin/sh", "-c", cmd)
|
||||
f, err := pty.Start(c)
|
||||
return c, f, f, err
|
||||
}
|
38
vendor/github.com/cosmtrek/air/runner/util_windows.go
generated
vendored
Normal file
38
vendor/github.com/cosmtrek/air/runner/util_windows.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (e *Engine) killCmd(cmd *exec.Cmd) (pid int, err error) {
|
||||
pid = cmd.Process.Pid
|
||||
// https://stackoverflow.com/a/44551450
|
||||
kill := exec.Command("TASKKILL", "/T", "/F", "/PID", strconv.Itoa(pid))
|
||||
return pid, kill.Run()
|
||||
}
|
||||
|
||||
func (e *Engine) startCmd(cmd string) (*exec.Cmd, io.ReadCloser, io.ReadCloser, error) {
|
||||
var err error
|
||||
|
||||
if !strings.Contains(cmd, ".exe") {
|
||||
e.runnerLog("CMD will not recognize non .exe file for execution, path: %s", cmd)
|
||||
}
|
||||
|
||||
c := exec.Command("cmd", "/c", cmd)
|
||||
stderr, err := c.StderrPipe()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
stdout, err := c.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
err = c.Start()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return c, stdout, stderr, err
|
||||
}
|
4
vendor/github.com/cosmtrek/air/version.go
generated
vendored
Normal file
4
vendor/github.com/cosmtrek/air/version.go
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
package main
|
||||
|
||||
var airVersion string
|
||||
var goVersion string
|
4
vendor/github.com/creack/pty/.gitignore
generated
vendored
Normal file
4
vendor/github.com/creack/pty/.gitignore
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
[568].out
|
||||
_go*
|
||||
_test*
|
||||
_obj
|
14
vendor/github.com/creack/pty/Dockerfile.riscv
generated
vendored
Normal file
14
vendor/github.com/creack/pty/Dockerfile.riscv
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
FROM golang:1.13
|
||||
|
||||
# Clone and complie a riscv compatible version of the go compiler.
|
||||
RUN git clone https://review.gerrithub.io/riscv/riscv-go /riscv-go
|
||||
# riscvdev branch HEAD as of 2019-06-29.
|
||||
RUN cd /riscv-go && git checkout 04885fddd096d09d4450726064d06dd107e374bf
|
||||
ENV PATH=/riscv-go/misc/riscv:/riscv-go/bin:$PATH
|
||||
RUN cd /riscv-go/src && GOROOT_BOOTSTRAP=$(go env GOROOT) ./make.bash
|
||||
ENV GOROOT=/riscv-go
|
||||
|
||||
# Make sure we compile.
|
||||
WORKDIR pty
|
||||
ADD . .
|
||||
RUN GOOS=linux GOARCH=riscv go build
|
23
vendor/github.com/creack/pty/LICENSE
generated
vendored
Normal file
23
vendor/github.com/creack/pty/LICENSE
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
Copyright (c) 2011 Keith Rarick
|
||||
|
||||
Permission is hereby granted, free of charge, to any person
|
||||
obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without limitation
|
||||
the rights to use, copy, modify, merge, publish, distribute,
|
||||
sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall
|
||||
be included in all copies or substantial portions of the
|
||||
Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
|
||||
KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
|
||||
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
||||
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
|
||||
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
100
vendor/github.com/creack/pty/README.md
generated
vendored
Normal file
100
vendor/github.com/creack/pty/README.md
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
# pty
|
||||
|
||||
Pty is a Go package for using unix pseudo-terminals.
|
||||
|
||||
## Install
|
||||
|
||||
go get github.com/creack/pty
|
||||
|
||||
## Example
|
||||
|
||||
### Command
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/creack/pty"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func main() {
|
||||
c := exec.Command("grep", "--color=auto", "bar")
|
||||
f, err := pty.Start(c)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
f.Write([]byte("foo\n"))
|
||||
f.Write([]byte("bar\n"))
|
||||
f.Write([]byte("baz\n"))
|
||||
f.Write([]byte{4}) // EOT
|
||||
}()
|
||||
io.Copy(os.Stdout, f)
|
||||
}
|
||||
```
|
||||
|
||||
### Shell
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/creack/pty"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
func test() error {
|
||||
// Create arbitrary command.
|
||||
c := exec.Command("bash")
|
||||
|
||||
// Start the command with a pty.
|
||||
ptmx, err := pty.Start(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Make sure to close the pty at the end.
|
||||
defer func() { _ = ptmx.Close() }() // Best effort.
|
||||
|
||||
// Handle pty size.
|
||||
ch := make(chan os.Signal, 1)
|
||||
signal.Notify(ch, syscall.SIGWINCH)
|
||||
go func() {
|
||||
for range ch {
|
||||
if err := pty.InheritSize(os.Stdin, ptmx); err != nil {
|
||||
log.Printf("error resizing pty: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
ch <- syscall.SIGWINCH // Initial resize.
|
||||
|
||||
// Set stdin in raw mode.
|
||||
oldState, err := terminal.MakeRaw(int(os.Stdin.Fd()))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer func() { _ = terminal.Restore(int(os.Stdin.Fd()), oldState) }() // Best effort.
|
||||
|
||||
// Copy stdin to the pty and the pty to stdout.
|
||||
go func() { _, _ = io.Copy(ptmx, os.Stdin) }()
|
||||
_, _ = io.Copy(os.Stdout, ptmx)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := test(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
```
|
16
vendor/github.com/creack/pty/doc.go
generated
vendored
Normal file
16
vendor/github.com/creack/pty/doc.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Package pty provides functions for working with Unix terminals.
|
||||
package pty
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ErrUnsupported is returned if a function is not
|
||||
// available on the current platform.
|
||||
var ErrUnsupported = errors.New("unsupported")
|
||||
|
||||
// Opens a pty and its corresponding tty.
|
||||
func Open() (pty, tty *os.File, err error) {
|
||||
return open()
|
||||
}
|
4
vendor/github.com/creack/pty/go.mod
generated
vendored
Normal file
4
vendor/github.com/creack/pty/go.mod
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
module github.com/creack/pty
|
||||
|
||||
go 1.13
|
||||
|
13
vendor/github.com/creack/pty/ioctl.go
generated
vendored
Normal file
13
vendor/github.com/creack/pty/ioctl.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// +build !windows,!solaris
|
||||
|
||||
package pty
|
||||
|
||||
import "syscall"
|
||||
|
||||
func ioctl(fd, cmd, ptr uintptr) error {
|
||||
_, _, e := syscall.Syscall(syscall.SYS_IOCTL, fd, cmd, ptr)
|
||||
if e != 0 {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
39
vendor/github.com/creack/pty/ioctl_bsd.go
generated
vendored
Normal file
39
vendor/github.com/creack/pty/ioctl_bsd.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
// +build darwin dragonfly freebsd netbsd openbsd
|
||||
|
||||
package pty
|
||||
|
||||
// from <sys/ioccom.h>
|
||||
const (
|
||||
_IOC_VOID uintptr = 0x20000000
|
||||
_IOC_OUT uintptr = 0x40000000
|
||||
_IOC_IN uintptr = 0x80000000
|
||||
_IOC_IN_OUT uintptr = _IOC_OUT | _IOC_IN
|
||||
_IOC_DIRMASK = _IOC_VOID | _IOC_OUT | _IOC_IN
|
||||
|
||||
_IOC_PARAM_SHIFT = 13
|
||||
_IOC_PARAM_MASK = (1 << _IOC_PARAM_SHIFT) - 1
|
||||
)
|
||||
|
||||
func _IOC_PARM_LEN(ioctl uintptr) uintptr {
|
||||
return (ioctl >> 16) & _IOC_PARAM_MASK
|
||||
}
|
||||
|
||||
func _IOC(inout uintptr, group byte, ioctl_num uintptr, param_len uintptr) uintptr {
|
||||
return inout | (param_len&_IOC_PARAM_MASK)<<16 | uintptr(group)<<8 | ioctl_num
|
||||
}
|
||||
|
||||
func _IO(group byte, ioctl_num uintptr) uintptr {
|
||||
return _IOC(_IOC_VOID, group, ioctl_num, 0)
|
||||
}
|
||||
|
||||
func _IOR(group byte, ioctl_num uintptr, param_len uintptr) uintptr {
|
||||
return _IOC(_IOC_OUT, group, ioctl_num, param_len)
|
||||
}
|
||||
|
||||
func _IOW(group byte, ioctl_num uintptr, param_len uintptr) uintptr {
|
||||
return _IOC(_IOC_IN, group, ioctl_num, param_len)
|
||||
}
|
||||
|
||||
func _IOWR(group byte, ioctl_num uintptr, param_len uintptr) uintptr {
|
||||
return _IOC(_IOC_IN_OUT, group, ioctl_num, param_len)
|
||||
}
|
30
vendor/github.com/creack/pty/ioctl_solaris.go
generated
vendored
Normal file
30
vendor/github.com/creack/pty/ioctl_solaris.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package pty
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// see /usr/include/sys/stropts.h
|
||||
I_PUSH = uintptr((int32('S')<<8 | 002))
|
||||
I_STR = uintptr((int32('S')<<8 | 010))
|
||||
I_FIND = uintptr((int32('S')<<8 | 013))
|
||||
// see /usr/include/sys/ptms.h
|
||||
ISPTM = (int32('P') << 8) | 1
|
||||
UNLKPT = (int32('P') << 8) | 2
|
||||
PTSSTTY = (int32('P') << 8) | 3
|
||||
ZONEPT = (int32('P') << 8) | 4
|
||||
OWNERPT = (int32('P') << 8) | 5
|
||||
)
|
||||
|
||||
type strioctl struct {
|
||||
ic_cmd int32
|
||||
ic_timout int32
|
||||
ic_len int32
|
||||
ic_dp unsafe.Pointer
|
||||
}
|
||||
|
||||
func ioctl(fd, cmd, ptr uintptr) error {
|
||||
return unix.IoctlSetInt(int(fd), uint(cmd), int(ptr))
|
||||
}
|
19
vendor/github.com/creack/pty/mktypes.bash
generated
vendored
Normal file
19
vendor/github.com/creack/pty/mktypes.bash
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
GOOSARCH="${GOOS}_${GOARCH}"
|
||||
case "$GOOSARCH" in
|
||||
_* | *_ | _)
|
||||
echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
GODEFS="go tool cgo -godefs"
|
||||
|
||||
$GODEFS types.go |gofmt > ztypes_$GOARCH.go
|
||||
|
||||
case $GOOS in
|
||||
freebsd|dragonfly|openbsd)
|
||||
$GODEFS types_$GOOS.go |gofmt > ztypes_$GOOSARCH.go
|
||||
;;
|
||||
esac
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user