Compare commits
362 commits
feature/no
...
master
Author | SHA1 | Date | |
---|---|---|---|
adbba165a6 | |||
d837b6f4ef | |||
c700df21c1 | |||
9b6eabced0 | |||
7d2df472a6 | |||
ff49aa4da3 | |||
d94706dd54 | |||
|
40efc5bea9 | ||
3318b6d62f | |||
abd8c8fc95 | |||
fd38c9a98b | |||
1123cc8e19 | |||
47f07ad059 | |||
6871cb70c8 | |||
57aa057198 | |||
19d79af2db | |||
a61e416bb5 | |||
06da0b1e92 | |||
c13261154f | |||
77f4adf134 | |||
|
0a42ee66b0 | ||
|
6416c2c32a | ||
|
4ab054c502 | ||
|
016440fd4b | ||
|
cb06c6f6e1 | ||
|
276c5da9f5 | ||
|
84a8191de9 | ||
|
6fc2b27fcf | ||
c981844975 | |||
fc505d7f85 | |||
5382012065 | |||
42bcd0adf1 | |||
6910d565c7 | |||
8e3fff2ca1 | |||
5ebcce6742 | |||
35ade80ab1 | |||
c2e4633b6c | |||
9e9f6d37d2 | |||
9a134fc2ba | |||
b104e22c7a | |||
1d22c1e9c1 | |||
82ee7b2a4c | |||
c091b8908a | |||
6d6a87fe5b | |||
e1adffa72b | |||
|
09ac8b5463 | ||
|
cd8177e0ae | ||
|
012b35d2f8 | ||
|
5ef86cd090 | ||
|
6e424a5541 | ||
d800394eb9 | |||
7e2a6604dd | |||
|
709bfdd734 | ||
|
f42aed52ab | ||
|
c37270f61f | ||
|
24d737d5b4 | ||
|
4bc4f62720 | ||
|
0b60ccb92b | ||
|
c413936f1c | ||
|
9400eedc06 | ||
|
80b4014588 | ||
|
4ae34b4d02 | ||
|
48add9dc89 | ||
|
7d32cac40a | ||
|
989647852e | ||
|
816b4823d2 | ||
|
a97a8f0aaf | ||
|
e2080865b9 | ||
|
ad7086886c | ||
|
8e08fed4ee | ||
|
4c5e5f20ee | ||
|
708684ccc1 | ||
|
c61323257b | ||
|
06fe4034f0 | ||
|
b011e73f64 | ||
|
44eb6c45f2 | ||
|
4499ca1d34 | ||
|
09b9a73637 | ||
|
f455bdf433 | ||
|
f03bc7ec18 | ||
|
648ff7322f | ||
|
00b188d9ff | ||
db576c7add | |||
3dc36ac0af | |||
2e1e603daf | |||
|
6f716134dd | ||
|
0fb195b958 | ||
|
4636320e3b | ||
|
4c05627d1f | ||
|
985bc4020a | ||
|
46215d4372 | ||
|
934d17b994 | ||
|
83825d26e9 | ||
|
a3d7ed7ceb | ||
|
cac79dea01 | ||
|
22ab86d2d6 | ||
|
f98d6a4ae3 | ||
|
c017ed1680 | ||
|
562b248198 | ||
|
611db1f4a9 | ||
|
8bb2ab6321 | ||
|
26f3d34ac3 | ||
|
759bed5c14 | ||
|
eb643e3adb | ||
|
19debc5181 | ||
|
7fa2f598a1 | ||
|
01f8337dcb | ||
|
edb6e9b044 | ||
|
3fb3b2f3de | ||
|
b074535366 | ||
|
b59175d13f | ||
|
e5f8e69d89 | ||
|
574b3c92b4 | ||
|
47503e6c98 | ||
|
c7090252ff | ||
|
730dd71ac5 | ||
|
452dd4996d | ||
|
1957d38907 | ||
|
6c0c7085c9 | ||
|
cdc73fe379 | ||
|
e1d7cfd5e8 | ||
|
92dd2833d1 | ||
|
d98eb758b5 | ||
|
a3973583f8 | ||
|
fcbbe2979f | ||
|
311db051fb | ||
|
46683cbc4e | ||
|
784ecca724 | ||
|
1c72796657 | ||
|
7b7e9bc66a | ||
|
e32047ffcc | ||
fa24b10607 | |||
|
ad066be1c8 | ||
20e8f4453c | |||
f739252479 | |||
b27a88676d | |||
e16d0d211b | |||
38336fc566 | |||
1b9dc9b4ba | |||
ccc2dc426c | |||
|
29bb07d2cb | ||
|
67fbd9d9c8 | ||
|
bac88530c1 | ||
|
b9bfa7b90c | ||
|
61a5a021cf | ||
|
713a06e999 | ||
|
e44acbebf7 | ||
|
3626f8cc6c | ||
|
207bd5dac7 | ||
|
3d7ac29ee1 | ||
|
2379128240 | ||
|
bd4908d7c8 | ||
|
083ab2c6a6 | ||
|
34180710cf | ||
|
8887cb7a26 | ||
|
8a4525cb80 | ||
|
05c5042985 | ||
|
2213fc70f5 | ||
|
06a05223b5 | ||
|
143e77fdfc | ||
|
8acbfdbeb7 | ||
|
73ec78a7da | ||
|
46cd6febae | ||
|
c16635ea71 | ||
|
29e4ec3027 | ||
|
c7a1fd04a4 | ||
|
c4ddb99710 | ||
|
9494e08e8f | ||
|
86cbb01bbf | ||
|
a6562f4017 | ||
|
01606746c1 | ||
|
558353ee99 | ||
|
19b66de202 | ||
|
5c17c711b1 | ||
|
61d388372b | ||
98ee9148fc | |||
aaa77fce4e | |||
cc7b5378e1 | |||
4a4fc6c25f | |||
1cb1c932dc | |||
|
9bcd24fbd6 | ||
12c8cb1982 | |||
67bda4f479 | |||
f89a097732 | |||
fb26b20a42 | |||
e0f402c25a | |||
b2bc4e81f3 | |||
5170ab1e8d | |||
|
83dc4f3e07 | ||
eb2e39acef | |||
|
b9f2fdaf9d | ||
26e178542a | |||
|
3c33cedcca | ||
6e4b6a4e6c | |||
70b846c733 | |||
216cadc50f | |||
b0fe26d31b | |||
|
f070cdbd1f | ||
8384d08c26 | |||
a94d5c0efa | |||
d84ddc9834 | |||
3b6095bcf5 | |||
99180c44e4 | |||
32eda02f32 | |||
37d6dd41fd | |||
0ba22e894f | |||
5ce2129406 | |||
|
95e27e2655 | ||
|
452dfeb56d | ||
|
80acbe974d | ||
|
4fb0a1e712 | ||
|
39d2e303f1 | ||
|
decb1ed860 | ||
|
a830c53218 | ||
16d5eecd74 | |||
f1849ef17f | |||
bc9d3c9575 | |||
10f303bc63 | |||
037eaea85b | |||
101ac458f5 | |||
a4176e1dd6 | |||
a3813d64dc | |||
|
bb160d345e | ||
|
3cf36a79cc | ||
01c3c74b9a | |||
56011315db | |||
610d7b8e45 | |||
6f4eb40f91 | |||
58334f6ab9 | |||
e50679fd0d | |||
cee6f7b507 | |||
2514966acd | |||
00a3164f4a | |||
2a34984f86 | |||
33e7e8dfd0 | |||
a1aa8b21e2 | |||
d9b82fad23 | |||
ba8a436cb4 | |||
05aa933b9a | |||
|
e17831904d | ||
|
21f5019e1c | ||
|
b2e720e66c | ||
|
4c959d62b7 | ||
|
c5fb8f1fe0 | ||
|
973409cc8f | ||
|
33faf622c8 | ||
|
44a96b1fde | ||
|
26ef312557 | ||
|
2eb99ffb20 | ||
|
ca39675055 | ||
|
a31a579d24 | ||
|
32a8a24a0f | ||
|
0613f8a0ca | ||
|
a23477301e | ||
|
d654094671 | ||
|
93a0526a7c | ||
|
5bf1ff715f | ||
|
c6e7d68016 | ||
|
13a01e7c56 | ||
|
3b830417a2 | ||
|
245659807a | ||
|
2f26419ac7 | ||
|
67df644858 | ||
|
ca6b3a67a3 | ||
|
f2c708cff2 | ||
|
2f26cea790 | ||
|
30c5612cfb | ||
|
cbb749d77c | ||
|
1cfe84b8b8 | ||
|
30ca844513 | ||
|
c611b2b080 | ||
|
9dc5f5d993 | ||
|
393006d306 | ||
|
ea0fcf3951 | ||
|
cbff89bdea | ||
|
9676e7aac1 | ||
|
e7d2cc2cd5 | ||
|
93740b7c37 | ||
|
d26fd0d234 | ||
|
fd7e1a926a | ||
|
4cc63380f2 | ||
|
eddbbbf3eb | ||
|
326a005ac7 | ||
|
cd02cd6deb | ||
|
94c64d3f6d | ||
|
f85ac8c510 | ||
|
0ed3f8fbbf | ||
|
9e63609c14 | ||
|
bdef500858 | ||
|
211ae50980 | ||
|
a231d7992a | ||
|
26854106a3 | ||
|
85158c3624 | ||
|
f24908eb7a | ||
|
1956f98c1d | ||
|
ecb77ddb56 | ||
|
d3e5246e9a | ||
|
380ac3e227 | ||
|
16107fd3ba | ||
|
ee126870b2 | ||
|
abe349dad9 | ||
|
ac0c135ded | ||
|
c5d15c3be7 | ||
|
79e627e080 | ||
|
dd5cde1919 | ||
|
c47a310683 | ||
|
0d974b9f2a | ||
|
be27b2b4d6 | ||
|
98f96d1795 | ||
|
7bfe250035 | ||
|
e47d6d8c1a | ||
|
7193b651ec | ||
|
62424ea120 | ||
|
c2bcd00322 | ||
|
91815ac24a | ||
|
f6b5017b4a | ||
|
ab5637a11f | ||
|
e721ca102d | ||
|
4a38d6faeb | ||
|
96c0bc432d | ||
|
9d82bb8d1f | ||
|
048a888836 | ||
|
a51dc61276 | ||
|
95c59ed2cd | ||
|
b72b401e67 | ||
|
37c058d281 | ||
|
e361537d32 | ||
|
51b371ff3c | ||
|
cc4b0f3b14 | ||
|
75b2232733 | ||
|
a553f2eafb | ||
|
82c24e6b4c | ||
|
dbab7c36b5 | ||
|
438a41a933 | ||
|
a21313bb39 | ||
|
43010e1bd2 | ||
|
9a2d10ceea | ||
|
33d04dc4f4 | ||
|
4468a26a3e | ||
|
b639a8ef89 | ||
|
efe1d8482f | ||
|
586d1078e6 | ||
|
6b81686cf6 | ||
|
f241f36893 | ||
|
9eff99f618 | ||
|
df8e19ed90 | ||
|
8988247232 | ||
|
3ff96e8b49 | ||
|
a6cda6537b | ||
|
08bb07e2d0 | ||
|
be9f986578 | ||
|
d59136d477 | ||
|
ee8fa827e3 | ||
|
261232e10d | ||
|
2b982778f3 | ||
|
66a293e79d | ||
|
f4c0bd13b2 | ||
|
f18d04f406 | ||
|
b3055c362c | ||
|
18b8dbe4fe | ||
|
7d1606bc95 | ||
|
b590340a10 |
46 changed files with 4787 additions and 596 deletions
1
.envrc
Normal file
1
.envrc
Normal file
|
@ -0,0 +1 @@
|
|||
use nix
|
174
.gitea/workflows/4testing-build.yml.arch
Normal file
174
.gitea/workflows/4testing-build.yml.arch
Normal file
|
@ -0,0 +1,174 @@
|
|||
### This workflow setup instance then build and push images ###
|
||||
name: 4testing multiarch-build
|
||||
run-name: >-
|
||||
Build #${{ inputs.build }} [
|
||||
${{ inputs.amd64 && 'AMD64' || '-' }}
|
||||
${{ inputs.arm64 && 'ARM64' || '-' }}
|
||||
] [
|
||||
${{ inputs.community && 'CE' || '-' }}
|
||||
${{ inputs.developer && 'DE' || '-' }}
|
||||
${{ inputs.enterprise && 'EE' || '-' }}
|
||||
]
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
build:
|
||||
description: 'Build number (ex. 45)'
|
||||
type: string
|
||||
required: true
|
||||
amd64:
|
||||
type: boolean
|
||||
description: 'Build AMD64'
|
||||
default: true
|
||||
arm64:
|
||||
type: boolean
|
||||
description: 'Build ARM64'
|
||||
default: true
|
||||
community:
|
||||
type: boolean
|
||||
description: 'Build Community Edition'
|
||||
default: true
|
||||
enterprise:
|
||||
type: boolean
|
||||
description: 'Build Enterprise Edition'
|
||||
default: true
|
||||
developer:
|
||||
type: boolean
|
||||
description: 'Build Developer Edition'
|
||||
default: true
|
||||
|
||||
env:
|
||||
COMPANY_NAME: "onlyoffice"
|
||||
PRODUCT_NAME: "documentserver"
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- id: matrix
|
||||
run: |
|
||||
set -ex
|
||||
|
||||
BRANCH_NAME=${GITHUB_REF#refs/heads/}
|
||||
if ! [[ $BRANCH_NAME == develop || $BRANCH_NAME =~ hotfix || $BRANCH_NAME =~ release ]]; then
|
||||
echo "Wrong branch."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
[ ${{ github.event.inputs.amd64 }} = true ] && PLATFORMS+=("amd64")
|
||||
[ ${{ github.event.inputs.arm64 }} = true ] && PLATFORMS+=("arm64")
|
||||
if [ -z ${PLATFORMS} ]; then
|
||||
echo "None of the platforms are selected."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
[ ${{ github.event.inputs.community }} = true ] && EDITIONS+=("community")
|
||||
[ ${{ github.event.inputs.enterprise }} = true ] && EDITIONS+=("enterprise")
|
||||
[ ${{ github.event.inputs.developer }} = true ] && EDITIONS+=("developer")
|
||||
if [ -z ${EDITIONS} ]; then
|
||||
echo "None of the editions are selected."
|
||||
exit 1
|
||||
fi
|
||||
echo "editions=$(jq -n -c --arg s "${EDITIONS[*]}" '($s|split(" "))')" >> $GITHUB_OUTPUT
|
||||
outputs:
|
||||
editions: ${{ steps.matrix.outputs.editions }}
|
||||
|
||||
build:
|
||||
name: "Build ${{ matrix.image }}-${{ matrix.edition }}"
|
||||
runs-on: ubuntu-latest
|
||||
needs: prepare
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
image: ["documentserver"]
|
||||
edition: ${{ fromJSON(needs.prepare.outputs.editions) }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Build 4testing
|
||||
id: build-ds
|
||||
run: |
|
||||
set -eux
|
||||
|
||||
### ==>> At this step build variable declaration ###
|
||||
|
||||
case ${{ matrix.edition }} in
|
||||
community)
|
||||
PRODUCT_EDITION=""
|
||||
;;
|
||||
enterprise)
|
||||
PRODUCT_EDITION="-ee"
|
||||
;;
|
||||
developer)
|
||||
PRODUCT_EDITION="-de"
|
||||
;;
|
||||
esac
|
||||
|
||||
[ ${{ github.event.inputs.amd64 }} = true ] && PLATFORMS+=("amd64")
|
||||
[ ${{ github.event.inputs.arm64 }} = true ] && PLATFORMS+=("arm64")
|
||||
PLATFORM=$(echo ${PLATFORMS[*]/#/linux/} | tr ' ' ',')
|
||||
|
||||
BRANCH_NAME=${GITHUB_REF#refs/heads/}
|
||||
if [ $BRANCH_NAME = develop ]; then
|
||||
BUILD_CHANNEL=nightly
|
||||
PRODUCT_VERSION=99.99.99
|
||||
elif [[ $BRANCH_NAME =~ hotfix || $BRANCH_NAME =~ release ]]; then
|
||||
BUILD_CHANNEL=test
|
||||
PRODUCT_VERSION=${BRANCH_NAME#*/v}
|
||||
fi
|
||||
BUILD_NUMBER=${{ github.event.inputs.build }}
|
||||
|
||||
export PRODUCT_EDITION
|
||||
export PACKAGE_VERSION=${PRODUCT_VERSION}-${BUILD_NUMBER}
|
||||
export PACKAGE_BASEURL=${{ secrets.REPO_BASEURL }}
|
||||
export BUILD_CHANNEL
|
||||
export PLATFORM
|
||||
export DOCKERFILE=Dockerfile
|
||||
export PREFIX_NAME=4testing-
|
||||
export TAG=${PRODUCT_VERSION}.${BUILD_NUMBER}
|
||||
|
||||
### ==>> Build and push images at this step ###
|
||||
|
||||
docker buildx bake -f docker-bake.hcl ${{ matrix.image }} --push
|
||||
echo "DONE: Build success"
|
||||
|
||||
### Set output for Zap scanner
|
||||
### NOTE: Output will be used only in release/hotfix branches
|
||||
|
||||
echo "version=${TAG}" >> "$GITHUB_OUTPUT"
|
||||
echo "branch=${BRANCH_NAME}" >> "$GITHUB_OUTPUT"
|
||||
shell: bash
|
||||
|
||||
# Run scanner only when edition is community
|
||||
# and branch hit release/ or hotfix/
|
||||
- name: Trigger zap manualy
|
||||
if: >-
|
||||
matrix.edition == 'community' &&
|
||||
(startsWith(steps.build-ds.outputs.branch, 'release/') ||
|
||||
startsWith(steps.build-ds.outputs.branch, 'hotfix/'))
|
||||
env:
|
||||
VERSION: ${{ steps.build-ds.outputs.version }}
|
||||
BRANCH: ${{ steps.build-ds.outputs.branch }}
|
||||
GITHUB_TOKEN: ${{ secrets.TOKEN }}
|
||||
run: |
|
||||
gh workflow run zap-ds.yaml \
|
||||
--repo ${{ github.repository }} \
|
||||
-f branch=${BRANCH} \
|
||||
-f version=${VERSION}
|
||||
shell: bash
|
||||
|
66
.gitea/workflows/build.yml
Normal file
66
.gitea/workflows/build.yml
Normal file
|
@ -0,0 +1,66 @@
|
|||
name: Build
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * 1"
|
||||
push:
|
||||
tags:
|
||||
- 'manual_build'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Get upstream repository
|
||||
run: |
|
||||
git clone https://github.com/ONLYOFFICE/DocumentServer/ upstream
|
||||
|
||||
- name: Extract tags and labels for Docker
|
||||
id: meta
|
||||
run: |
|
||||
export VERSION="$(cd upstream && git describe --tags --abbrev=0 | cut -d 'v' -f2-)"
|
||||
echo "Version: ${VERSION}"
|
||||
echo "tag=${VERSION}" >> $GITHUB_OUTPUT
|
||||
shell: bash
|
||||
|
||||
- name: Check if the image was already built
|
||||
id: check-build-status
|
||||
run: |
|
||||
echo ${{ steps.meta.outputs.tag }}
|
||||
if curl --silent -f -lSL "https://hub.docker.com/v2/repositories/jiriks74/onlyoffice-documentserver/tags/$(echo ${{ steps.meta.outputs.tag }})" > /dev/null; then echo "Already exists" && exit 1; else echo "Desn't exist" && exit 0; fi
|
||||
|
||||
- name: Set tag in Dockerfile
|
||||
run: |
|
||||
sed -i "s/ARG PACKAGE_VERSION=/ARG PACKAGE_VERSION=$(echo ${{ steps.meta.outputs.tag }} | cut -d ':' -f2)/g" Dockerfile
|
||||
|
||||
- name: Update repositories
|
||||
run: sudo apt update
|
||||
- name: Install Docker
|
||||
uses: awalsh128/cache-apt-pkgs-action@latest
|
||||
with:
|
||||
packages: docker.io
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build and push image
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
tags: jiriks74/onlyoffice-documentserver:${{ steps.meta.outputs.tag }},jiriks74/onlyoffice-documentserver:latest
|
||||
platforms: linux/amd64,linux/arm64
|
||||
context: .
|
||||
file: ./Dockerfile
|
22
.gitea/workflows/cron-rebuild-trigger.yml.arch
Normal file
22
.gitea/workflows/cron-rebuild-trigger.yml.arch
Normal file
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
name: Trigger 4testing rebuild
|
||||
|
||||
run-name: "Weekly 4testing rebuild trigger"
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run every Saturday at 10 p.m.
|
||||
- cron: '00 22 * * 6'
|
||||
|
||||
jobs:
|
||||
trigger-rebuild:
|
||||
name: "trigget-rebuild"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- name: Rebuild 4testing manualy
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.TOKEN }}
|
||||
run: |
|
||||
gh workflow run rebuild.yml \
|
||||
--repo ONLYOFFICE/Docker-DocumentServer \
|
||||
-f repo=4test
|
224
.gitea/workflows/rebuild.yml.arch
Normal file
224
.gitea/workflows/rebuild.yml.arch
Normal file
|
@ -0,0 +1,224 @@
|
|||
---
|
||||
name: Rebuild Docker-Documentserver
|
||||
|
||||
run-name: >
|
||||
Rebuild DocumentServer with secure updates for repo: ${{ github.event.inputs.repo }}
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
repo:
|
||||
type: choice
|
||||
description: Please, choose upload repo..
|
||||
options:
|
||||
- '4test'
|
||||
- 'stable'
|
||||
|
||||
permissions:
|
||||
# All other permissions are set to none
|
||||
contents: read
|
||||
# Technically read access while waiting for images should be more than enough. However,
|
||||
# there is a bug in GitHub Actions/Packages and in case private repositories are used, you get a permission
|
||||
# denied error when attempting to just pull private image, changing the token permission to write solves the
|
||||
# issue. This is not dangerous, because if it is for "ONLYOFFICE/Docker-DocumentServer", only maintainers can use ds-rebuild.yaml
|
||||
# If it is for a fork, then the token is read-only anyway.
|
||||
packages: read
|
||||
|
||||
env:
|
||||
COMPANY_NAME: "onlyoffice"
|
||||
PRODUCT_NAME: "documentserver"
|
||||
REGISTRY_URL: "https://hub.docker.com/v2/repositories"
|
||||
|
||||
jobs:
|
||||
rebuild-info:
|
||||
name: "Rebuild-info"
|
||||
runs-on: "ubuntu-22.04"
|
||||
env:
|
||||
REPO_INPUTS: ${{ github.event.inputs.repo }}
|
||||
EVENT: ${{ github.event_name }}
|
||||
outputs:
|
||||
stable-versions: ${{ steps.selective-checks.outputs.stable-versions }}
|
||||
ucs-versions: ${{ steps.selective-checks.outputs.ucs-versions }}
|
||||
minor-tags: ${{ steps.selective-checks.outputs.minor-tags }}
|
||||
ucs-rebuild-condition: ${{ steps.selective-checks.outputs.ucs-rebuild-condition }}
|
||||
prefix-name: ${{ steps.selective-checks.outputs.prefix-name }}
|
||||
repo: ${{ steps.selective-checks.outputs.repo }}
|
||||
steps:
|
||||
- name: Selective checks
|
||||
id: selective-checks
|
||||
run: |
|
||||
set -e
|
||||
|
||||
REPO=${REPO_INPUTS:-"4test"}
|
||||
|
||||
if [ "${REPO}" == "stable" ]; then
|
||||
UCS_REBUILD=true
|
||||
UCS_VERSIONS=($(curl -s -H -X ${REGISTRY_URL}/${COMPANY_NAME}/${PRODUCT_NAME}-ucs/tags/?page_size=100 | \
|
||||
jq -r '.results|.[]|.name' | grep -oxE '[0-9]{1,}.[0-9]{1,}.[0-9]{1,}.1' || true))
|
||||
echo "ucs-versions=$(jq -c -n '$ARGS.positional' --args "${UCS_VERSIONS[@]}")" >> "$GITHUB_OUTPUT"
|
||||
elif
|
||||
[ "${REPO}" == "4test" ]; then
|
||||
UCS_REBUILD=false
|
||||
PREFIX_NAME=4testing-
|
||||
fi
|
||||
|
||||
STABLE_VERSIONS=($(curl -s -H -X ${REGISTRY_URL}/${COMPANY_NAME}/${PRODUCT_NAME}/tags/?page_size=100 | \
|
||||
jq -r '.results|.[]|.name' | grep -oxE '[0-9]{1,}.[0-9]{1,}.[0-9]{1,}.1' || true))
|
||||
|
||||
# When rebuilding stable versions of the document server,
|
||||
# it is necessary to determine the version from which the
|
||||
# minor x.x tag will need to be pushed.
|
||||
|
||||
VERSIONS=(${STABLE_VERSIONS[@]})
|
||||
for i in {1..10}; do
|
||||
if [ -z "${VERSIONS}" ]; then
|
||||
break
|
||||
else
|
||||
TEMPLATE=${VERSIONS[0]%.*.*}
|
||||
TEMPLATE_MINOR=$(printf -- '%s\n' "${VERSIONS[@]}" | grep -o -m 1 "${VERSIONS[0]%.*.*}.[0-9].[0-9]")
|
||||
MINOR_TAGS+=(${TEMPLATE_MINOR%.*})
|
||||
|
||||
for v in ${MINOR_TAGS[@]}; do
|
||||
VERSIONS=(${VERSIONS[@]//${v%.*}.*.*})
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Stable releases that will be rebuilded"
|
||||
echo "--------------------------------------"
|
||||
echo "${STABLE_VERSIONS[@]}"
|
||||
echo
|
||||
echo
|
||||
echo "Ucs releases that will be rebuilded"
|
||||
echo "-----------------------------------"
|
||||
echo "${UCS_VERSIONS[@]}"
|
||||
|
||||
echo "stable-versions=$(jq -c -n '$ARGS.positional' --args "${STABLE_VERSIONS[@]}")" >> "$GITHUB_OUTPUT"
|
||||
echo "minor-tags=${MINOR_TAGS[@]}" >> "$GITHUB_OUTPUT"
|
||||
echo "ucs-rebuild-condition=${UCS_REBUILD}" >> "$GITHUB_OUTPUT"
|
||||
echo "prefix-name=${PREFIX_NAME}" >> "$GITHUB_OUTPUT"
|
||||
echo "repo=${REPO}" >> "$GITHUB_OUTPUT"
|
||||
shell: bash
|
||||
|
||||
re-build-stable:
|
||||
name: "Rebuild stable:${{ matrix.version }} ${{ matrix.edition }}"
|
||||
needs: [rebuild-info]
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
type: ["stable"]
|
||||
edition: ["", "-ee", "-de"]
|
||||
version: ${{fromJSON(needs.rebuild-info.outputs.stable-versions)}}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
# Determines the new build number based
|
||||
# on data from the hub.docker registry
|
||||
- name: Declare release number
|
||||
id: release-number
|
||||
env:
|
||||
REBUILD_VERSION: ${{ matrix.version }}
|
||||
run: |
|
||||
MINOR_VERSION=${REBUILD_VERSION%.*}
|
||||
LAST_RELEASE=$(curl -s -H -X ${REGISTRY_URL}/${COMPANY_NAME}/${PRODUCT_NAME}/tags/?page_size=100 \
|
||||
| jq -r '.results|.[]|.name' | grep -Eo -m1 "${MINOR_VERSION}.[0-9]{1,}")
|
||||
LAST_RELEASE=${LAST_RELEASE#*.*.*.}
|
||||
echo "release-number=$((LAST_RELEASE+1))" >> "$GITHUB_OUTPUT"
|
||||
shell: bash
|
||||
# Note: Rebuilding images with an
|
||||
# extra layer to update security and
|
||||
# all dependencies. Update tags got +1 to previous release.
|
||||
- name: Re-build documentserver-stable
|
||||
env:
|
||||
MINOR_TAGS_ST: ${{ needs.rebuild-info.outputs.minor-tags }}
|
||||
VERSION: ${{ matrix.version }}
|
||||
RELEASE_NUMBER: ${{ steps.release-number.outputs.release-number }}
|
||||
PREFIX_NAME: ${{ needs.rebuild-info.outputs.prefix-name }}
|
||||
REPO: ${{ needs.rebuild-info.outputs.repo }}
|
||||
PRODUCT_EDITION: ${{ matrix.edition }}
|
||||
run: |
|
||||
set -eux
|
||||
export PULL_TAG=${VERSION}
|
||||
export TAG=${VERSION%.*}.${RELEASE_NUMBER}
|
||||
export SHORTER_TAG=${VERSION%.*}
|
||||
export SHORTEST_TAG=${VERSION%.*.*}
|
||||
|
||||
if [ "${REPO}" == "stable" ]; then
|
||||
MINOR_TAGS=(${MINOR_TAGS_ST})
|
||||
for v in ${MINOR_TAGS[@]}; do
|
||||
if [ "${SHORTER_TAG}" == "${v}" ]; then
|
||||
export PUSH_MAJOR="true"
|
||||
fi
|
||||
done
|
||||
if [ "${SHORTER_TAG}" == "${MINOR_TAGS[0]}" ]; then
|
||||
export LATEST="true"
|
||||
fi
|
||||
fi
|
||||
docker buildx bake -f docker-bake.hcl documentserver-stable-rebuild --push
|
||||
shell: bash
|
||||
re-build-ucs:
|
||||
name: "Rebuild ucs: ${{ matrix.version }} ${{ matrix.edition }}"
|
||||
if: needs.rebuild-info.outputs.ucs-rebuild-condition == 'true'
|
||||
needs: [rebuild-info]
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
type: ["ucs"]
|
||||
edition: ["", "-ee"]
|
||||
version: ${{fromJSON(needs.rebuild-info.outputs.ucs-versions)}}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
# Determines the new build number based
|
||||
# on data from the hub.docker registry
|
||||
- name: Declare release number
|
||||
id: release-number
|
||||
env:
|
||||
REBUILD_VERSION: ${{ matrix.version }}
|
||||
run: |
|
||||
MINOR_VERSION=${REBUILD_VERSION%.*}
|
||||
LAST_RELEASE=$(curl -s -H -X ${REGISTRY_URL}/${COMPANY_NAME}/${PRODUCT_NAME}/tags/?page_size=100 \
|
||||
| jq -r '.results|.[]|.name' | grep -Eo -m1 "${MINOR_VERSION}.[0-9]{1,}")
|
||||
LAST_RELEASE=${LAST_RELEASE#*.*.*.}
|
||||
echo "release-number=$((LAST_RELEASE+1))" >> "$GITHUB_OUTPUT"
|
||||
shell: bash
|
||||
# Note: Rebuilding images with an
|
||||
# extra layer to update security and
|
||||
# all dependencies. Update tags +1 to previous release.
|
||||
- name: Re-build documentserver-ucs
|
||||
env:
|
||||
VERSION: ${{ matrix.version }}
|
||||
RELEASE_NUMBER: ${{ steps.release-number.outputs.release-number }}
|
||||
PRODUCT_EDITION: ${{ matrix.edition }}
|
||||
run: |
|
||||
set -eux
|
||||
export PULL_TAG=${VERSION}
|
||||
export TAG=${VERSION%.*}.${RELEASE_NUMBER}
|
||||
export SHORTER_TAG=${VERSION%.*}
|
||||
export SHORTEST_TAG=${VERSION%.*.*}
|
||||
|
||||
export UCS_REBUILD=true
|
||||
export UCS_PREFIX=-ucs
|
||||
|
||||
docker buildx bake -f docker-bake.hcl documentserver-stable-rebuild --push
|
||||
shell: bash
|
137
.gitea/workflows/stable-build.yml.arch
Normal file
137
.gitea/workflows/stable-build.yml.arch
Normal file
|
@ -0,0 +1,137 @@
|
|||
### This workflow setup instance then build and push images ###
|
||||
name: Multi-arch build stable
|
||||
run-name: ${{ inputs.tag }} (${{ inputs.release_number }})
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Tag for release (ex. 1.2.3.45)'
|
||||
type: string
|
||||
required: true
|
||||
release_number:
|
||||
description: 'Sequence number of the release (ex. x.x.x.<number>)'
|
||||
type: string
|
||||
required: true
|
||||
default: '1'
|
||||
|
||||
env:
|
||||
COMPANY_NAME: "onlyoffice"
|
||||
PRODUCT_NAME: "documentserver"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: "Release image: DocumentServer${{ matrix.edition }}"
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
images: ["documentserver-stable"]
|
||||
edition: ["", "-ee", "-de"]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Build documentserver-release
|
||||
run: |
|
||||
set -eux
|
||||
VERSION=${{ github.event.inputs.tag }}
|
||||
RELEASE_NUMBER=${{ github.event.inputs.release_number }}
|
||||
PRODUCT_EDITION=${{ matrix.edition }}
|
||||
TESTING_IMAGE=${COMPANY_NAME}/4testing-${PRODUCT_NAME}${PRODUCT_EDITION}
|
||||
export PRODUCT_EDITION
|
||||
export PULL_TAG=${VERSION}
|
||||
export TAG=${VERSION%.*}.${RELEASE_NUMBER}
|
||||
export SHORTER_TAG=${VERSION%.*}
|
||||
export SHORTEST_TAG=${VERSION%.*.*}
|
||||
docker buildx bake -f docker-bake.hcl ${{ matrix.images }} --push
|
||||
echo "DONE: Build success >> exit with 0"
|
||||
exit 0
|
||||
shell: bash
|
||||
|
||||
build-nonexample:
|
||||
name: "Release image: DocumentServer${{ matrix.edition }}-nonExample"
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build]
|
||||
if: ${{ false }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
images: ["documentserver-nonexample"]
|
||||
edition: ["", "-ee", "-de"]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: build image
|
||||
run: |
|
||||
set -eux
|
||||
VERSION=${{ github.event.inputs.tag }}
|
||||
RELEASE_NUMBER=${{ github.event.inputs.release_number }}
|
||||
export PULL_TAG=${VERSION%.*}.${RELEASE_NUMBER}
|
||||
export PRODUCT_EDITION=${{ matrix.edition }}
|
||||
export TAG=${VERSION%.*}.${RELEASE_NUMBER}
|
||||
docker buildx bake -f docker-bake.hcl ${{ matrix.images }} --push
|
||||
shell: bash
|
||||
|
||||
build-ucs-ubuntu20:
|
||||
name: "Release image: DocumentServer${{ matrix.edition }}-ucs"
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
edition: ["", "-ee"]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: build UCS
|
||||
run: |
|
||||
set -eux
|
||||
VERSION=${{ github.event.inputs.tag }}
|
||||
RELEASE_NUMBER=${{ github.event.inputs.release_number }}
|
||||
export PRODUCT_EDITION=${{ matrix.edition }}
|
||||
export PACKAGE_BASEURL=${{ secrets.REPO_BASEURL }}
|
||||
export DOCKERFILE=Dockerfile
|
||||
export BASE_IMAGE=ubuntu:20.04
|
||||
export PG_VERSION=12
|
||||
export TAG=${VERSION%.*}.${RELEASE_NUMBER}
|
||||
export PACKAGE_VERSION=$( echo ${VERSION} | sed -E 's/(.*)\./\1-/')
|
||||
docker buildx bake -f docker-bake.hcl documentserver-ucs --push
|
||||
shell: bash
|
70
.gitea/workflows/zap-ds.yaml.arch
Normal file
70
.gitea/workflows/zap-ds.yaml.arch
Normal file
|
@ -0,0 +1,70 @@
|
|||
---
|
||||
name: Scanning DocumentServer with ZAP
|
||||
|
||||
run-name: >
|
||||
ZAP DocumentServer ver: ${{ github.event.inputs.version }} from branch: ${{ github.event.inputs.branch }}
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Set DocumentServer version that will be deployed'
|
||||
type: string
|
||||
required: true
|
||||
branch:
|
||||
description: 'The branch from which the scan will be performed'
|
||||
type: string
|
||||
required: true
|
||||
jobs:
|
||||
zap:
|
||||
name: "Zap scanning DocumentServer"
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Run DS
|
||||
id: run-ds
|
||||
env:
|
||||
TAG: ${{ github.event.inputs.version }}
|
||||
run: |
|
||||
# Create ssl certs
|
||||
openssl genrsa -out tls.key 2048
|
||||
openssl req -new -key tls.key -out tls.csr -subj "/C=RU/ST=NizhObl/L=NizhNov/O=RK-Tech/OU=TestUnit/CN=TestName"
|
||||
openssl x509 -req -days 365 -in tls.csr -signkey tls.key -out tls.crt
|
||||
openssl dhparam -out dhparam.pem 2048
|
||||
sudo mkdir -p /app/onlyoffice/DocumentServer/data/certs
|
||||
sudo cp ./tls.key /app/onlyoffice/DocumentServer/data/certs/
|
||||
sudo cp ./tls.crt /app/onlyoffice/DocumentServer/data/certs/
|
||||
sudo cp ./dhparam.pem /app/onlyoffice/DocumentServer/data/certs/
|
||||
sudo chmod 400 /app/onlyoffice/DocumentServer/data/certs/tls.key
|
||||
rm ./tls.key ./tls.crt ./dhparam.pem
|
||||
|
||||
# Run Ds with enabled ssl
|
||||
export CONTAINER_NAME="documentserver"
|
||||
sudo docker run -itd \
|
||||
--name ${CONTAINER_NAME} \
|
||||
-p 80:80 \
|
||||
-p 443:443 \
|
||||
-v /app/onlyoffice/DocumentServer/data:/var/www/onlyoffice/Data \
|
||||
onlyoffice/4testing-documentserver:${TAG}
|
||||
sleep 60
|
||||
sudo docker exec ${CONTAINER_NAME} sudo supervisorctl start ds:example
|
||||
LOCAL_IP=$(hostname -I | awk '{print $1}')
|
||||
echo "local-ip=${LOCAL_IP}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
# Scan DocumentServer with ZAP.
|
||||
# NOTE: Full scan get a lot of time.
|
||||
# If you want make scan more faster (but less accurate) remove `cmd options` field
|
||||
# -j mean that scanning use AJAX Spider, with this spider the scan takes approximately an hour
|
||||
# Without any cmd options will be used default spider and the scan takes approximately ~10-15 minutes
|
||||
- name: ZAP Scan
|
||||
uses: zaproxy/action-full-scan@v0.8.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
docker_name: 'ghcr.io/zaproxy/zaproxy:stable'
|
||||
target: 'https://${{ steps.run-ds.outputs.local-ip }}/'
|
||||
allow_issue_writing: false
|
||||
cmd_options: '-j'
|
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
# Nix, direnv
|
||||
.direnv
|
131
.travis.yml
Normal file
131
.travis.yml
Normal file
|
@ -0,0 +1,131 @@
|
|||
language: generic
|
||||
|
||||
dist: trusty
|
||||
|
||||
env:
|
||||
# community edition
|
||||
- config: standalone.yml
|
||||
|
||||
# integration edition
|
||||
- config: standalone.yml
|
||||
PRODUCT_NAME: documentserver-ie
|
||||
|
||||
|
||||
# certificates (default tls if onlyoffice not exists)
|
||||
- config: certs.yml
|
||||
ssl: true
|
||||
|
||||
# certificates (default onlyoffice if exists)
|
||||
- config: certs.yml
|
||||
ssl: true
|
||||
private_key: onlyoffice.key
|
||||
certificate_request: onlyoffice.csr
|
||||
certificate: onlyoffice.crt
|
||||
|
||||
# custom certificates
|
||||
- config: certs-customized.yml
|
||||
ssl: true
|
||||
private_key: mycert.key
|
||||
certificate_request: mycert.csr
|
||||
certificate: mycert.crt
|
||||
SSL_CERTIFICATE_PATH: /var/www/onlyoffice/Data/certs/mycert.crt
|
||||
SSL_KEY_PATH: /var/www/onlyoffice/Data/certs/mycert.key
|
||||
|
||||
|
||||
# postgresql 16
|
||||
- config: postgres.yml
|
||||
POSTGRES_VERSION: 16
|
||||
|
||||
# postgresql 15
|
||||
- config: postgres.yml
|
||||
POSTGRES_VERSION: 15
|
||||
|
||||
# postgresql 14
|
||||
- config: postgres.yml
|
||||
POSTGRES_VERSION: 14
|
||||
|
||||
# postgresql 13
|
||||
- config: postgres.yml
|
||||
POSTGRES_VERSION: 13
|
||||
|
||||
# postgresql 12
|
||||
- config: postgres.yml
|
||||
|
||||
# postgresql custom values
|
||||
- config: postgres.yml
|
||||
DB_NAME: mydb
|
||||
DB_USER: myuser
|
||||
DB_PWD: password
|
||||
POSTGRES_DB: mydb
|
||||
POSTGRES_USER: myuser
|
||||
|
||||
# postgresql deprecated variables
|
||||
- config: postgres-old.yml
|
||||
|
||||
|
||||
# mysql 8
|
||||
- config: mysql.yml
|
||||
MYSQL_VERSION: 8
|
||||
|
||||
# mysql 5
|
||||
- config: mysql.yml
|
||||
MYSQL_VERSION: 5
|
||||
|
||||
# mysql 5.7
|
||||
- config: mysql.yml
|
||||
|
||||
|
||||
# mariadb 10
|
||||
- config: mariadb.yml
|
||||
MARIADB_VERSION: 10
|
||||
|
||||
# mariadb 10.5
|
||||
- config: mariadb.yml
|
||||
|
||||
|
||||
- config: activemq.yml
|
||||
ACTIVEMQ_VERSION: latest
|
||||
|
||||
# activemq 5.14.3
|
||||
- config: activemq.yml
|
||||
|
||||
|
||||
# rabbitmq latest
|
||||
- config: rabbitmq.yml
|
||||
|
||||
# rabbitmq 3
|
||||
- config: rabbitmq.yml
|
||||
RABBITMQ_VERSION: 3
|
||||
|
||||
# rabbitmq old variables
|
||||
- config: rabbitmq-old.yml
|
||||
|
||||
|
||||
# redis latest with community edition
|
||||
- config: redis.yml
|
||||
|
||||
# redis latest with integraion edition
|
||||
- config: redis.yml
|
||||
PRODUCT_NAME: documentserver-ie
|
||||
|
||||
# redis 6
|
||||
- config: redis.yml
|
||||
REDIS_VERSION: 6
|
||||
|
||||
# redis 5
|
||||
- config: redis.yml
|
||||
REDIS_VERSION: 5
|
||||
|
||||
|
||||
# graphite
|
||||
- config: graphite.yml
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
script:
|
||||
# Go to tests dir
|
||||
- cd ${PWD}/tests
|
||||
|
||||
# Run test.
|
||||
- ./test.sh
|
67
Dockerfile
67
Dockerfile
|
@ -1,39 +1,44 @@
|
|||
FROM ubuntu:16.04
|
||||
LABEL maintainer Ascensio System SIA <support@onlyoffice.com>
|
||||
ARG BASE_IMAGE=ubuntu:22.04
|
||||
|
||||
ENV LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=en_US.UTF-8 DEBIAN_FRONTEND=noninteractive
|
||||
FROM ${BASE_IMAGE} as documentserver
|
||||
LABEL maintainer Jiří Štefka <jiri.stefka.js@gmail.com>
|
||||
|
||||
ARG PG_VERSION=14
|
||||
|
||||
ENV LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=en_US.UTF-8 DEBIAN_FRONTEND=noninteractive PG_VERSION=${PG_VERSION}
|
||||
|
||||
ARG ONLYOFFICE_VALUE=onlyoffice
|
||||
|
||||
RUN echo "#!/bin/sh\nexit 0" > /usr/sbin/policy-rc.d && \
|
||||
apt-get -y update && \
|
||||
apt-get -yq install wget apt-transport-https curl locales && \
|
||||
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 0x8320ca65cb2de8e5 && \
|
||||
apt-get -yq install wget apt-transport-https gnupg locales lsb-release && \
|
||||
locale-gen en_US.UTF-8 && \
|
||||
curl -sL https://deb.nodesource.com/setup_8.x | bash - && \
|
||||
apt-get -y update && \
|
||||
echo ttf-mscorefonts-installer msttcorefonts/accepted-mscorefonts-eula select true | debconf-set-selections && \
|
||||
apt-get -yq install \
|
||||
adduser \
|
||||
apt-utils \
|
||||
bomstrip \
|
||||
certbot \
|
||||
cron \
|
||||
curl \
|
||||
htop \
|
||||
libasound2 \
|
||||
libboost-regex-dev \
|
||||
libcairo2 \
|
||||
libcurl3 \
|
||||
libgconf2-4 \
|
||||
libgtkglext1 \
|
||||
libcurl3-gnutls \
|
||||
libcurl4 \
|
||||
libgtk-3-0 \
|
||||
libnspr4 \
|
||||
libnss3 \
|
||||
libnss3-nssdb \
|
||||
libstdc++6 \
|
||||
libxml2 \
|
||||
libxss1 \
|
||||
libxtst6 \
|
||||
mysql-client \
|
||||
nano \
|
||||
net-tools \
|
||||
netcat \
|
||||
netcat-openbsd \
|
||||
nginx-extras \
|
||||
nodejs \
|
||||
postgresql \
|
||||
postgresql-client \
|
||||
pwgen \
|
||||
|
@ -42,11 +47,18 @@ RUN echo "#!/bin/sh\nexit 0" > /usr/sbin/policy-rc.d && \
|
|||
software-properties-common \
|
||||
sudo \
|
||||
supervisor \
|
||||
ttf-mscorefonts-installer \
|
||||
xvfb \
|
||||
zlib1g && \
|
||||
sudo -u postgres psql -c "CREATE DATABASE $ONLYOFFICE_VALUE;" && \
|
||||
if [ $(ls -l /usr/share/fonts/truetype/msttcorefonts | wc -l) -ne 61 ]; \
|
||||
then echo 'msttcorefonts failed to download'; exit 1; fi && \
|
||||
echo "SERVER_ADDITIONAL_ERL_ARGS=\"+S 1:1\"" | tee -a /etc/rabbitmq/rabbitmq-env.conf && \
|
||||
sed -i "s/bind .*/bind 127.0.0.1/g" /etc/redis/redis.conf && \
|
||||
sed 's|\(application\/zip.*\)|\1\n application\/wasm wasm;|' -i /etc/nginx/mime.types && \
|
||||
pg_conftool $PG_VERSION main set listen_addresses 'localhost' && \
|
||||
service postgresql restart && \
|
||||
sudo -u postgres psql -c "CREATE USER $ONLYOFFICE_VALUE WITH password '$ONLYOFFICE_VALUE';" && \
|
||||
sudo -u postgres psql -c "GRANT ALL privileges ON DATABASE $ONLYOFFICE_VALUE TO $ONLYOFFICE_VALUE;" && \
|
||||
sudo -u postgres psql -c "CREATE DATABASE $ONLYOFFICE_VALUE OWNER $ONLYOFFICE_VALUE;" && \
|
||||
service postgresql stop && \
|
||||
service redis-server stop && \
|
||||
service rabbitmq-server stop && \
|
||||
|
@ -54,27 +66,38 @@ RUN echo "#!/bin/sh\nexit 0" > /usr/sbin/policy-rc.d && \
|
|||
service nginx stop && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY config /app/ds/setup/config/
|
||||
COPY config/supervisor/supervisor /etc/init.d/
|
||||
COPY config/supervisor/ds/*.conf /etc/supervisor/conf.d/
|
||||
COPY run-document-server.sh /app/ds/run-document-server.sh
|
||||
|
||||
EXPOSE 80 443
|
||||
|
||||
ARG REPO_URL="deb http://download.onlyoffice.com/repo/debian squeeze main"
|
||||
ARG COMPANY_NAME=onlyoffice
|
||||
ARG PRODUCT_NAME=documentserver
|
||||
ARG PRODUCT_EDITION=
|
||||
ARG PACKAGE_VERSION=
|
||||
ARG TARGETARCH
|
||||
ARG PACKAGE_BASEURL="http://download.onlyoffice.com/install/documentserver/linux"
|
||||
|
||||
ENV COMPANY_NAME=$COMPANY_NAME
|
||||
ENV COMPANY_NAME=$COMPANY_NAME \
|
||||
PRODUCT_NAME=$PRODUCT_NAME \
|
||||
PRODUCT_EDITION=$PRODUCT_EDITION \
|
||||
DS_DOCKER_INSTALLATION=true
|
||||
|
||||
RUN echo "$REPO_URL" | tee /etc/apt/sources.list.d/ds.list && \
|
||||
RUN PACKAGE_FILE="${COMPANY_NAME}-${PRODUCT_NAME}${PRODUCT_EDITION}${PACKAGE_VERSION:+_$PACKAGE_VERSION}_${TARGETARCH:-$(dpkg --print-architecture)}.deb" && \
|
||||
wget -q -P /tmp "$PACKAGE_BASEURL/$PACKAGE_FILE" && \
|
||||
apt-get -y update && \
|
||||
service postgresql start && \
|
||||
apt-get -yq install $COMPANY_NAME-$PRODUCT_NAME && \
|
||||
apt-get -yq install /tmp/$PACKAGE_FILE && \
|
||||
service postgresql stop && \
|
||||
chmod 755 /etc/init.d/supervisor && \
|
||||
sed "s/COMPANY_NAME/${COMPANY_NAME}/g" -i /etc/supervisor/conf.d/*.conf && \
|
||||
service supervisor stop && \
|
||||
chmod 755 /app/ds/*.sh && \
|
||||
rm -f /tmp/$PACKAGE_FILE && \
|
||||
rm -rf /var/log/$COMPANY_NAME && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
VOLUME /var/log/$COMPANY_NAME /var/lib/$COMPANY_NAME /var/www/$COMPANY_NAME/Data /var/lib/postgresql /usr/share/fonts/truetype/custom
|
||||
VOLUME /var/log/$COMPANY_NAME /var/lib/$COMPANY_NAME /var/www/$COMPANY_NAME/Data /var/lib/postgresql /var/lib/rabbitmq /var/lib/redis /usr/share/fonts/truetype/custom
|
||||
|
||||
ENTRYPOINT /app/ds/run-document-server.sh
|
||||
ENTRYPOINT ["/app/ds/run-document-server.sh"]
|
||||
|
|
82
Makefile
82
Makefile
|
@ -1,66 +1,66 @@
|
|||
COMPANY_NAME ?= onlyoffice
|
||||
COMPANY_NAME ?= ONLYOFFICE
|
||||
GIT_BRANCH ?= develop
|
||||
PRODUCT_NAME ?= documentserver-ie
|
||||
PRODUCT_NAME ?= documentserver
|
||||
PRODUCT_EDITION ?=
|
||||
PRODUCT_VERSION ?= 0.0.0
|
||||
BUILD_NUMBER ?= 0
|
||||
BUILD_CHANNEL ?= nightly
|
||||
ONLYOFFICE_VALUE ?= onlyoffice
|
||||
|
||||
PACKAGE_VERSION := $(PRODUCT_VERSION)-$(BUILD_NUMBER)
|
||||
COMPANY_NAME_LOW = $(shell echo $(COMPANY_NAME) | tr A-Z a-z)
|
||||
|
||||
REPO_URL := "deb [trusted=yes] http://repo-doc-onlyoffice-com.s3.amazonaws.com/ubuntu/trusty/$(COMPANY_NAME)-$(PRODUCT_NAME)/$(GIT_BRANCH)/$(PACKAGE_VERSION)/ repo/"
|
||||
PACKAGE_NAME := $(COMPANY_NAME_LOW)-$(PRODUCT_NAME)$(PRODUCT_EDITION)
|
||||
PACKAGE_VERSION ?= $(PRODUCT_VERSION)-$(BUILD_NUMBER)~stretch
|
||||
PACKAGE_BASEURL ?= https://s3.eu-west-1.amazonaws.com/repo-doc-onlyoffice-com/server/linux/debian
|
||||
|
||||
UPDATE_LATEST := false
|
||||
|
||||
ifneq (,$(findstring develop,$(GIT_BRANCH)))
|
||||
DOCKER_TAG += $(subst -,.,$(PACKAGE_VERSION))
|
||||
DOCKER_TAGS += latest
|
||||
else ifneq (,$(findstring release,$(GIT_BRANCH)))
|
||||
DOCKER_TAG += $(subst -,.,$(PACKAGE_VERSION))
|
||||
else ifneq (,$(findstring hotfix,$(GIT_BRANCH)))
|
||||
DOCKER_TAG += $(subst -,.,$(PACKAGE_VERSION))
|
||||
ifeq ($(BUILD_CHANNEL),$(filter $(BUILD_CHANNEL),nightly test))
|
||||
DOCKER_TAG := $(PRODUCT_VERSION).$(BUILD_NUMBER)
|
||||
else
|
||||
DOCKER_TAG += $(subst -,.,$(PACKAGE_VERSION))-$(subst /,-,$(GIT_BRANCH))
|
||||
DOCKER_TAG := $(PRODUCT_VERSION).$(BUILD_NUMBER)-$(subst /,-,$(GIT_BRANCH))
|
||||
endif
|
||||
|
||||
DOCKER_TAGS += $(DOCKER_TAG)
|
||||
DOCKER_ORG ?= $(COMPANY_NAME_LOW)
|
||||
DOCKER_IMAGE := $(DOCKER_ORG)/4testing-$(PRODUCT_NAME)$(PRODUCT_EDITION)
|
||||
DOCKER_DUMMY := $(COMPANY_NAME_LOW)-$(PRODUCT_NAME)$(PRODUCT_EDITION)__$(DOCKER_TAG).dummy
|
||||
DOCKER_ARCH := $(COMPANY_NAME_LOW)-$(PRODUCT_NAME)_$(DOCKER_TAG).tar.gz
|
||||
|
||||
DOCKER_REPO = $(COMPANY_NAME)/4testing-$(PRODUCT_NAME)
|
||||
|
||||
COLON := __colon__
|
||||
DOCKER_TARGETS := $(foreach TAG,$(DOCKER_TAGS),$(DOCKER_REPO)$(COLON)$(TAG))
|
||||
|
||||
DOCKER_ARCH := $(COMPANY_NAME)-$(PRODUCT_NAME)_$(PACKAGE_VERSION).tar.gz
|
||||
|
||||
.PHONY: all clean clean-docker deploy docker publish
|
||||
|
||||
$(DOCKER_TARGETS): $(DEB_REPO_DATA)
|
||||
.PHONY: all clean clean-docker image deploy docker
|
||||
|
||||
$(DOCKER_DUMMY):
|
||||
docker pull ubuntu:22.04
|
||||
docker build \
|
||||
--build-arg REPO_URL=$(REPO_URL) \
|
||||
--build-arg COMPANY_NAME=$(COMPANY_NAME) \
|
||||
--build-arg COMPANY_NAME=$(COMPANY_NAME_LOW) \
|
||||
--build-arg PRODUCT_NAME=$(PRODUCT_NAME) \
|
||||
--build-arg PRODUCT_EDITION=$(PRODUCT_EDITION) \
|
||||
--build-arg PACKAGE_VERSION=$(PACKAGE_VERSION) \
|
||||
--build-arg PACKAGE_BASEURL=$(PACKAGE_BASEURL) \
|
||||
--build-arg TARGETARCH=amd64 \
|
||||
--build-arg ONLYOFFICE_VALUE=$(ONLYOFFICE_VALUE) \
|
||||
-t $(subst $(COLON),:,$@) . &&\
|
||||
mkdir -p $$(dirname $@) &&\
|
||||
-t $(DOCKER_IMAGE):$(DOCKER_TAG) . && \
|
||||
mkdir -p $$(dirname $@) && \
|
||||
echo "Done" > $@
|
||||
|
||||
$(DOCKER_ARCH): $(DOCKER_TARGETS)
|
||||
docker save $(DOCKER_REPO):$(DOCKER_TAG) | \
|
||||
$(DOCKER_ARCH): $(DOCKER_DUMMY)
|
||||
docker save $(DOCKER_IMAGE):$(DOCKER_TAG) | \
|
||||
gzip > $@
|
||||
|
||||
all: $(DOCKER_TARGETS)
|
||||
all: image
|
||||
|
||||
clean:
|
||||
rm -rfv $(DOCKER_TARGETS) $(DOCKER_ARCH)
|
||||
rm -rfv *.dummy *.tar.gz
|
||||
|
||||
clean-docker:
|
||||
docker rmi -f $$(docker images -q $(COMPANY_NAME)/*) || exit 0
|
||||
docker rmi -f $$(docker images -q $(COMPANY_NAME_LOW)/*) || exit 0
|
||||
|
||||
deploy: $(DOCKER_TARGETS)
|
||||
$(foreach TARGET,$(DOCKER_TARGETS),docker push $(subst $(COLON),:,$(TARGET));)
|
||||
image: $(DOCKER_DUMMY)
|
||||
|
||||
publish: $(DOCKER_ARCH)
|
||||
aws s3 cp \
|
||||
$(DOCKER_ARCH) \
|
||||
s3://repo-doc-onlyoffice-com.s3.amazonaws.com/docker/amd64/ \
|
||||
--acl public-read
|
||||
deploy: $(DOCKER_DUMMY)
|
||||
for i in {1..3}; do \
|
||||
docker push $(DOCKER_IMAGE):$(DOCKER_TAG) && break || sleep 1m; \
|
||||
done
|
||||
ifeq ($(BUILD_CHANNEL),nightly)
|
||||
docker tag $(DOCKER_IMAGE):$(DOCKER_TAG) $(DOCKER_IMAGE):latest
|
||||
for i in {1..3}; do \
|
||||
docker push $(DOCKER_IMAGE):latest && break || sleep 1m; \
|
||||
done
|
||||
endif
|
||||
|
|
335
README.md
335
README.md
|
@ -1,3 +1,190 @@
|
|||
[![Docker Pulls](https://img.shields.io/docker/pulls/jiriks74/onlyoffice-documentserver.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=pulls&logo=docker)](https://hub.docker.com/r/jiriks74/onlyoffice-documentserver)
|
||||
[![Docker Stars](https://img.shields.io/docker/stars/jiriks74/onlyoffice-documentserver.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=stars&logo=docker)](https://hub.docker.com/r/jiriks74/onlyoffice-documentserver)
|
||||
[![Docker Size](https://img.shields.io/docker/image-size/jiriks74/onlyoffice-documentserver/latest.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=Size&logo=docker)](https://hub.docker.com/r/jiriks74/onlyoffice-documentserver)
|
||||
|
||||
#### This repository is based on the official `Dockerfile` and `docker-compose.yml` files with all the needed files as well
|
||||
|
||||
# Table of contents
|
||||
|
||||
- [Usage](#usage)
|
||||
- [Setting up secret key with nextcloud](#setting-up-secret-key-with-nextcloud)
|
||||
- [Larger file limits](#setting-up-larger-file-limits)
|
||||
- [Generating custom presentation themes](#generating-custom-presentation-themes)
|
||||
- [Tags on DockerHub](#tags-used-on-dockerhub)
|
||||
- [Building the image from source](#building-the-image-yourself-not-recommended---may-take-a-lot-of-time)
|
||||
- [Updating the image yourself](#updating-the-image-yourself)
|
||||
|
||||
## Usage
|
||||
|
||||
#### docker-compose with prebuilt image (recommended)
|
||||
|
||||
- Docker will pull the correct architecture automatically
|
||||
- Below in `Details` you'll find a `docker-compose.yml` templete.
|
||||
To use it, make a directory where data from the container will be saved, create
|
||||
`docker-compose.yml` file in it and put the template from below in it.
|
||||
Then modify the template to your needs.
|
||||
|
||||
<details>
|
||||
|
||||
```yml
|
||||
version: '2'
|
||||
services:
|
||||
onlyoffice-documentserver:
|
||||
image: jiriks74/onlyoffice-documentserver:latest
|
||||
container_name: onlyoffice-documentserver
|
||||
depends_on:
|
||||
- onlyoffice-postgresql
|
||||
- onlyoffice-rabbitmq
|
||||
environment:
|
||||
- DB_TYPE=postgres
|
||||
- DB_HOST=onlyoffice-postgresql
|
||||
- DB_PORT=5432
|
||||
- DB_NAME=onlyoffice
|
||||
- DB_USER=onlyoffice
|
||||
- AMQP_URI=amqp://guest:guest@onlyoffice-rabbitmq
|
||||
# Uncomment strings below to enable the JSON Web Token validation.
|
||||
#- JWT_ENABLED=true
|
||||
#- JWT_SECRET=secret
|
||||
#- JWT_HEADER=AuthorizationJwt
|
||||
#- JWT_IN_BODY=true
|
||||
# Uncomment the line below to set larger file limits (about 1GB)
|
||||
#- LARGER_FILE_LIMITS=true
|
||||
ports:
|
||||
- '80:80'
|
||||
- '443:443'
|
||||
stdin_open: true
|
||||
restart: always
|
||||
stop_grace_period: 120s
|
||||
volumes:
|
||||
# Uncomment the line below to get access to the slide themes directory.
|
||||
# To use the themes, copy them to the slideThemes directory and run `docker exec -it <container-name> /usr/bin/documentserver-generate-allfonts.sh`
|
||||
#- ./slideThemes:/var/www/onlyoffice/documentserver/sdkjs/slide/themes/src
|
||||
- /var/www/onlyoffice/Data
|
||||
- /var/log/onlyoffice
|
||||
- /var/lib/onlyoffice/documentserver/App_Data/cache/files
|
||||
- /var/www/onlyoffice/documentserver-example/public/files
|
||||
- /usr/share/fonts
|
||||
|
||||
onlyoffice-rabbitmq:
|
||||
container_name: onlyoffice-rabbitmq
|
||||
image: rabbitmq
|
||||
restart: always
|
||||
expose:
|
||||
- '5672'
|
||||
|
||||
onlyoffice-postgresql:
|
||||
container_name: onlyoffice-postgresql
|
||||
image: postgres:9.5
|
||||
environment:
|
||||
- POSTGRES_DB=onlyoffice
|
||||
- POSTGRES_USER=onlyoffice
|
||||
- POSTGRES_HOST_AUTH_METHOD=trust
|
||||
restart: always
|
||||
expose:
|
||||
- '5432'
|
||||
volumes:
|
||||
- postgresql_data:/var/lib/postgresql
|
||||
|
||||
volumes:
|
||||
postgresql_data:
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Setting up `Secret key` with Nextcloud
|
||||
|
||||
1. Uncomment four lines starting with `JWT` in `docker-compose`
|
||||
2. Set your secret on line `JWT_SECRET=yourSecret`
|
||||
3. Open Nexcloud's `config.php` (by default `/var/www/nextcloud/config/config.php`)
|
||||
4. Add this to the 2nd last line (before the line with `);`) and paste in your secret (3rd last line)
|
||||
|
||||
```php
|
||||
'onlyoffice' =>
|
||||
array (
|
||||
"jwt_secret" => "yourSecret",
|
||||
"jwt_header" => "AuthorizationJwt"
|
||||
)
|
||||
```
|
||||
|
||||
5. Go to your Nextcloud settings, navigate to Onlyoffice settings
|
||||
6. Add your server Address and Secret key
|
||||
7. Save
|
||||
|
||||
### Setting up larger file limits
|
||||
|
||||
- Uncomment the `- LARGER_FILE_LIMITS=true` line in `docker-compose.yml`
|
||||
|
||||
### Generating custom presentation themes
|
||||
|
||||
1. Uncomment the
|
||||
`- ./slideThemes:/var/www/onlyoffice/documentserver/sdkjs/slide/themes/src`
|
||||
line in `docker-compose.yml`
|
||||
2. Put your themes into the `slideThemes` directory
|
||||
3. Run `docker exec -it <container-name> /usr/bin/documentserver-generate-allfonts.sh`
|
||||
- (This will take some time. I have totally 35 themes and it took about 30 minutes to generate them on a Raspberry Pi 4 4GB on an external HDD - SSD may be faster)
|
||||
4. If you want to add more themes later, repeat step 2 and 3.
|
||||
|
||||
## Tags used on DockerHub
|
||||
|
||||
- `latest` - the latest version of the Documentserver
|
||||
- Version tags (eg. `7.0.1-37`) - these tags are equal to the Documentserver
|
||||
version of the `onlyoffice-documentserver` debian package used in the image
|
||||
|
||||
## Building the image yourself (not recommended - may take a lot of time)
|
||||
|
||||
#### 1. Clone the repository (for example to your home directory `cd /home/$USER/`)
|
||||
|
||||
`git clone https://github.com/jiriks74/Docker-DocumentServer.git && cd Docker-DocumentServer`
|
||||
|
||||
#### 2. Build the docker image
|
||||
|
||||
##### Building only for the architecture you are building the image on (when building on Raspberry Pi result will be `arm64`, when on pc result will be `amd64`)
|
||||
|
||||
`docker-compose build`
|
||||
|
||||
##### Building for all supported architectures (you have to have your environment setup for emulation of arm64 with `qemu` and build it on `amd64`) - you have to push to DockerHub
|
||||
|
||||
`docker buildx build --push --platform linux/arm64,linux/amd64,linux/386 .`
|
||||
|
||||
#### 3. Create and start the container
|
||||
|
||||
`docker-compose up -d`
|
||||
|
||||
- This will start the server. It is set to be automatically started/restarted so as long you have docker running on startup this will start automatically
|
||||
|
||||
### Updating the image yourself
|
||||
|
||||
#### 1. Stop and delete the old container
|
||||
|
||||
`docker-compose down`
|
||||
|
||||
#### 2. (optional) Clear the docker cache
|
||||
|
||||
#### - ! This will remove all unused cache images ! (good for saving space, bad if you develop with and need cache, but you understand it at that point)
|
||||
|
||||
`docker rmi $(docker images -f "dangling=true" -q)`
|
||||
|
||||
#### 4. Rebuild the image without cache
|
||||
|
||||
##### Building only for the architecture you are building the image on (when building on Raspberry Pi result will be `arm64`, when on pc result will be `amd64`)
|
||||
|
||||
`docker-compose build`
|
||||
|
||||
##### Building for all supported architectures (you have to have your environment setup for emulation of arm64 with `qemu`) - you'll have to push to DockerHub, multiplatform images cannot be saved locally (for whatever reason)
|
||||
|
||||
`docker buildx build --push --platform linux/arm64,linux/amd64,linux/386 .`
|
||||
|
||||
#### 3. Create and start the new container
|
||||
|
||||
`docker-compose up -d`
|
||||
|
||||
---
|
||||
|
||||
## The rest of this file is the official [`README.md` from OnlyOffice-Documentserver repository](https://github.com/ONLYOFFICE/Docker-DocumentServer). I will not change anything in it, it may not work, but considering the changes I made, it should be fully compatible (beware that you must change the `docker-compose.yml` from building the image locally to using this repository). If you want to change something, make a issue on my repository and we'll figure it out.
|
||||
|
||||
<details>
|
||||
|
||||
|
||||
* [Overview](#overview)
|
||||
* [Functionality](#functionality)
|
||||
* [Recommended System Requirements](#recommended-system-requirements)
|
||||
|
@ -11,6 +198,7 @@
|
|||
+ [Installation of the SSL Certificates](#installation-of-the-ssl-certificates)
|
||||
+ [Available Configuration Parameters](#available-configuration-parameters)
|
||||
* [Installing ONLYOFFICE Document Server integrated with Community and Mail Servers](#installing-onlyoffice-document-server-integrated-with-community-and-mail-servers)
|
||||
* [ONLYOFFICE Document Server ipv6 setup](#onlyoffice-document-server-ipv6-setup)
|
||||
* [Issues](#issues)
|
||||
- [Docker Issues](#docker-issues)
|
||||
- [Document Server usage Issues](#document-server-usage-issues)
|
||||
|
@ -21,6 +209,12 @@
|
|||
|
||||
ONLYOFFICE Document Server is an online office suite comprising viewers and editors for texts, spreadsheets and presentations, fully compatible with Office Open XML formats: .docx, .xlsx, .pptx and enabling collaborative editing in real time.
|
||||
|
||||
Starting from version 6.0, Document Server is distributed as ONLYOFFICE Docs. It has [three editions](https://github.com/ONLYOFFICE/DocumentServer#onlyoffice-document-server-editions). With this image, you will install the free Community version.
|
||||
|
||||
ONLYOFFICE Docs can be used as a part of ONLYOFFICE Workspace or with third-party sync&share solutions (e.g. Nextcloud, ownCloud, Seafile) to enable collaborative editing within their interface.
|
||||
|
||||
***Important*** Please update `docker-engine` to latest version (`20.10.21` as of writing this doc) before using it. We use `ubuntu:22.04` as base image and it older versions of docker have compatibility problems with it
|
||||
|
||||
## Functionality ##
|
||||
* ONLYOFFICE Document Editor
|
||||
* ONLYOFFICE Spreadsheet Editor
|
||||
|
@ -67,6 +261,8 @@ To get access to your data from outside the container, you need to mount the vol
|
|||
-v /app/onlyoffice/DocumentServer/logs:/var/log/onlyoffice \
|
||||
-v /app/onlyoffice/DocumentServer/data:/var/www/onlyoffice/Data \
|
||||
-v /app/onlyoffice/DocumentServer/lib:/var/lib/onlyoffice \
|
||||
-v /app/onlyoffice/DocumentServer/rabbitmq:/var/lib/rabbitmq \
|
||||
-v /app/onlyoffice/DocumentServer/redis:/var/lib/redis \
|
||||
-v /app/onlyoffice/DocumentServer/db:/var/lib/postgresql onlyoffice/documentserver
|
||||
|
||||
Normally, you do not need to store container data because the container's operation does not depend on its state. Saving data will be useful:
|
||||
|
@ -94,10 +290,17 @@ To secure the application via SSL basically two things are needed:
|
|||
|
||||
So you need to create and install the following files:
|
||||
|
||||
/app/onlyoffice/DocumentServer/data/certs/onlyoffice.key
|
||||
/app/onlyoffice/DocumentServer/data/certs/onlyoffice.crt
|
||||
/app/onlyoffice/DocumentServer/data/certs/tls.key
|
||||
/app/onlyoffice/DocumentServer/data/certs/tls.crt
|
||||
|
||||
When using CA certified certificates, these files are provided to you by the CA. When using self-signed certificates you need to generate these files yourself. Skip the following section if you are have CA certified SSL certificates.
|
||||
When using CA certified certificates (e.g [Let's encrypt](https://letsencrypt.org)), these files are provided to you by the CA. If you are using self-signed certificates you need to generate these files [yourself](#generation-of-self-signed-certificates).
|
||||
|
||||
#### Using the automatically generated Let's Encrypt SSL Certificates
|
||||
|
||||
sudo docker run -i -t -d -p 80:80 -p 443:443 \
|
||||
-e LETS_ENCRYPT_DOMAIN=your_domain -e LETS_ENCRYPT_MAIL=your_mail onlyoffice/documentserver
|
||||
|
||||
If you want to get and extend Let's Encrypt SSL Certificates automatically just set LETS_ENCRYPT_DOMAIN and LETS_ENCRYPT_MAIL variables.
|
||||
|
||||
#### Generation of Self Signed Certificates
|
||||
|
||||
|
@ -106,19 +309,19 @@ Generation of self-signed SSL certificates involves a simple 3 step procedure.
|
|||
**STEP 1**: Create the server private key
|
||||
|
||||
```bash
|
||||
openssl genrsa -out onlyoffice.key 2048
|
||||
openssl genrsa -out tls.key 2048
|
||||
```
|
||||
|
||||
**STEP 2**: Create the certificate signing request (CSR)
|
||||
|
||||
```bash
|
||||
openssl req -new -key onlyoffice.key -out onlyoffice.csr
|
||||
openssl req -new -key tls.key -out tls.csr
|
||||
```
|
||||
|
||||
**STEP 3**: Sign the certificate using the private key and CSR
|
||||
|
||||
```bash
|
||||
openssl x509 -req -days 365 -in onlyoffice.csr -signkey onlyoffice.key -out onlyoffice.crt
|
||||
openssl x509 -req -days 365 -in tls.csr -signkey tls.key -out tls.crt
|
||||
```
|
||||
|
||||
You have now generated an SSL certificate that's valid for 365 days.
|
||||
|
@ -134,18 +337,18 @@ openssl dhparam -out dhparam.pem 2048
|
|||
|
||||
#### Installation of the SSL Certificates
|
||||
|
||||
Out of the four files generated above, you need to install the `onlyoffice.key`, `onlyoffice.crt` and `dhparam.pem` files at the onlyoffice server. The CSR file is not needed, but do make sure you safely backup the file (in case you ever need it again).
|
||||
Out of the four files generated above, you need to install the `tls.key`, `tls.crt` and `dhparam.pem` files at the onlyoffice server. The CSR file is not needed, but do make sure you safely backup the file (in case you ever need it again).
|
||||
|
||||
The default path that the onlyoffice application is configured to look for the SSL certificates is at `/var/www/onlyoffice/Data/certs`, this can however be changed using the `SSL_KEY_PATH`, `SSL_CERTIFICATE_PATH` and `SSL_DHPARAM_PATH` configuration options.
|
||||
|
||||
The `/var/www/onlyoffice/Data/` path is the path of the data store, which means that you have to create a folder named certs inside `/app/onlyoffice/DocumentServer/data/` and copy the files into it and as a measure of security you will update the permission on the `onlyoffice.key` file to only be readable by the owner.
|
||||
The `/var/www/onlyoffice/Data/` path is the path of the data store, which means that you have to create a folder named certs inside `/app/onlyoffice/DocumentServer/data/` and copy the files into it and as a measure of security you will update the permission on the `tls.key` file to only be readable by the owner.
|
||||
|
||||
```bash
|
||||
mkdir -p /app/onlyoffice/DocumentServer/data/certs
|
||||
cp onlyoffice.key /app/onlyoffice/DocumentServer/data/certs/
|
||||
cp onlyoffice.crt /app/onlyoffice/DocumentServer/data/certs/
|
||||
cp tls.key /app/onlyoffice/DocumentServer/data/certs/
|
||||
cp tls.crt /app/onlyoffice/DocumentServer/data/certs/
|
||||
cp dhparam.pem /app/onlyoffice/DocumentServer/data/certs/
|
||||
chmod 400 /app/onlyoffice/DocumentServer/data/certs/onlyoffice.key
|
||||
chmod 400 /app/onlyoffice/DocumentServer/data/certs/tls.key
|
||||
```
|
||||
|
||||
You are now just one step away from having our application secured.
|
||||
|
@ -158,24 +361,39 @@ Below is the complete list of parameters that can be set using environment varia
|
|||
|
||||
- **ONLYOFFICE_HTTPS_HSTS_ENABLED**: Advanced configuration option for turning off the HSTS configuration. Applicable only when SSL is in use. Defaults to `true`.
|
||||
- **ONLYOFFICE_HTTPS_HSTS_MAXAGE**: Advanced configuration option for setting the HSTS max-age in the onlyoffice nginx vHost configuration. Applicable only when SSL is in use. Defaults to `31536000`.
|
||||
- **SSL_CERTIFICATE_PATH**: The path to the SSL certificate to use. Defaults to `/var/www/onlyoffice/Data/certs/onlyoffice.crt`.
|
||||
- **SSL_KEY_PATH**: The path to the SSL certificate's private key. Defaults to `/var/www/onlyoffice/Data/certs/onlyoffice.key`.
|
||||
- **SSL_CERTIFICATE_PATH**: The path to the SSL certificate to use. Defaults to `/var/www/onlyoffice/Data/certs/tls.crt`.
|
||||
- **SSL_KEY_PATH**: The path to the SSL certificate's private key. Defaults to `/var/www/onlyoffice/Data/certs/tls.key`.
|
||||
- **SSL_DHPARAM_PATH**: The path to the Diffie-Hellman parameter. Defaults to `/var/www/onlyoffice/Data/certs/dhparam.pem`.
|
||||
- **SSL_VERIFY_CLIENT**: Enable verification of client certificates using the `CA_CERTIFICATES_PATH` file. Defaults to `false`
|
||||
- **POSTGRESQL_SERVER_HOST**: The IP address or the name of the host where the PostgreSQL server is running.
|
||||
- **POSTGRESQL_SERVER_PORT**: The PostgreSQL server port number.
|
||||
- **POSTGRESQL_SERVER_DB_NAME**: The name of a PostgreSQL database to be created on the image startup.
|
||||
- **POSTGRESQL_SERVER_USER**: The new user name with superuser permissions for the PostgreSQL account.
|
||||
- **POSTGRESQL_SERVER_PASS**: The password set for the PostgreSQL account.
|
||||
- **AMQP_SERVER_URL**: The [AMQP URL](http://www.rabbitmq.com/uri-spec.html "RabbitMQ URI Specification") to connect to message broker server.
|
||||
- **AMQP_SERVER_TYPE**: The message broker type. Supported values are `rabbitmq` or `activemq`. Defaults to `rabbitmq`.
|
||||
- **DB_TYPE**: The database type. Supported values are `postgres`, `mariadb` or `mysql`. Defaults to `postgres`.
|
||||
- **DB_HOST**: The IP address or the name of the host where the database server is running.
|
||||
- **DB_PORT**: The database server port number.
|
||||
- **DB_NAME**: The name of a database to use. Should be existing on container startup.
|
||||
- **DB_USER**: The new user name with superuser permissions for the database account.
|
||||
- **DB_PWD**: The password set for the database account.
|
||||
- **AMQP_URI**: The [AMQP URI](https://www.rabbitmq.com/uri-spec.html "RabbitMQ URI Specification") to connect to message broker server.
|
||||
- **AMQP_TYPE**: The message broker type. Supported values are `rabbitmq` or `activemq`. Defaults to `rabbitmq`.
|
||||
- **REDIS_SERVER_HOST**: The IP address or the name of the host where the Redis server is running.
|
||||
- **REDIS_SERVER_PORT**: The Redis server port number.
|
||||
- **REDIS_SERVER_PASS**: The Redis server password. The password is not set by default.
|
||||
- **NGINX_WORKER_PROCESSES**: Defines the number of nginx worker processes.
|
||||
- **NGINX_WORKER_CONNECTIONS**: Sets the maximum number of simultaneous connections that can be opened by a nginx worker process.
|
||||
- **JWT_ENABLED**: Specifies the enabling the JSON Web Token validation by the ONLYOFFICE Document Server. Defaults to `false`.
|
||||
- **JWT_SECRET**: Defines the secret key to validate the JSON Web Token in the request to the ONLYOFFICE Document Server. Defaults to `secret`.
|
||||
- **SECURE_LINK_SECRET**: Defines secret for the nginx config directive [secure_link_md5](https://nginx.org/en/docs/http/ngx_http_secure_link_module.html#secure_link_md5). Defaults to `random string`.
|
||||
- **JWT_ENABLED**: Specifies the enabling the JSON Web Token validation by the ONLYOFFICE Document Server. Defaults to `true`.
|
||||
- **JWT_SECRET**: Defines the secret key to validate the JSON Web Token in the request to the ONLYOFFICE Document Server. Defaults to random value.
|
||||
- **JWT_HEADER**: Defines the http header that will be used to send the JSON Web Token. Defaults to `Authorization`.
|
||||
- **JWT_IN_BODY**: Specifies the enabling the token validation in the request body to the ONLYOFFICE Document Server. Defaults to `false`.
|
||||
- **WOPI_ENABLED**: Specifies the enabling the wopi handlers. Defaults to `false`.
|
||||
- **ALLOW_META_IP_ADDRESS**: Defines if it is allowed to connect meta IP address or not. Defaults to `false`.
|
||||
- **ALLOW_PRIVATE_IP_ADDRESS**: Defines if it is allowed to connect private IP address or not. Defaults to `false`.
|
||||
- **USE_UNAUTHORIZED_STORAGE**: Set to `true`if using selfsigned certificates for your storage server e.g. Nextcloud. Defaults to `false`
|
||||
- **GENERATE_FONTS**: When 'true' regenerates fonts list and the fonts thumbnails etc. at each start. Defaults to `true`
|
||||
- **METRICS_ENABLED**: Specifies the enabling StatsD for ONLYOFFICE Document Server. Defaults to `false`.
|
||||
- **METRICS_HOST**: Defines StatsD listening host. Defaults to `localhost`.
|
||||
- **METRICS_PORT**: Defines StatsD listening port. Defaults to `8125`.
|
||||
- **METRICS_PREFIX**: Defines StatsD metrics prefix for backend services. Defaults to `ds.`.
|
||||
- **LETS_ENCRYPT_DOMAIN**: Defines the domain for Let's Encrypt certificate.
|
||||
- **LETS_ENCRYPT_MAIL**: Defines the domain administator mail address for Let's Encrypt certificate.
|
||||
|
||||
## Installing ONLYOFFICE Document Server integrated with Community and Mail Servers
|
||||
|
||||
|
@ -192,18 +410,28 @@ Then launch containers on it using the 'docker run --net onlyoffice' option:
|
|||
|
||||
Follow [these steps](#installing-mysql) to install MySQL server.
|
||||
|
||||
**STEP 3**: Install ONLYOFFICE Document Server.
|
||||
**STEP 3**: Generate JWT Secret
|
||||
|
||||
JWT secret defines the secret key to validate the JSON Web Token in the request to the **ONLYOFFICE Document Server**. You can specify it yourself or easily get it using the command:
|
||||
```
|
||||
JWT_SECRET=$(cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 12);
|
||||
```
|
||||
|
||||
**STEP 4**: Install ONLYOFFICE Document Server.
|
||||
|
||||
```bash
|
||||
sudo docker run --net onlyoffice -i -t -d --restart=always --name onlyoffice-document-server \
|
||||
-v /app/onlyoffice/DocumentServer/logs:/var/log/onlyoffice \
|
||||
-v /app/onlyoffice/DocumentServer/data:/var/www/onlyoffice/Data \
|
||||
-v /app/onlyoffice/DocumentServer/lib:/var/lib/onlyoffice \
|
||||
-v /app/onlyoffice/DocumentServer/db:/var/lib/postgresql \
|
||||
onlyoffice/documentserver
|
||||
-e JWT_ENABLED=true \
|
||||
-e JWT_SECRET=${JWT_SECRET} \
|
||||
-e JWT_HEADER=AuthorizationJwt \
|
||||
-v /app/onlyoffice/DocumentServer/logs:/var/log/onlyoffice \
|
||||
-v /app/onlyoffice/DocumentServer/data:/var/www/onlyoffice/Data \
|
||||
-v /app/onlyoffice/DocumentServer/lib:/var/lib/onlyoffice \
|
||||
-v /app/onlyoffice/DocumentServer/db:/var/lib/postgresql \
|
||||
onlyoffice/documentserver
|
||||
```
|
||||
|
||||
**STEP 4**: Install ONLYOFFICE Mail Server.
|
||||
**STEP 5**: Install ONLYOFFICE Mail Server.
|
||||
|
||||
For the mail server correct work you need to specify its hostname 'yourdomain.com'.
|
||||
|
||||
|
@ -221,14 +449,14 @@ sudo docker run --init --net onlyoffice --privileged -i -t -d --restart=always -
|
|||
onlyoffice/mailserver
|
||||
```
|
||||
|
||||
The additional parameters for mail server are available [here](https://github.com/ONLYOFFICE/Docker-CommunityServer/blob/master/docker-compose.yml#L75).
|
||||
The additional parameters for mail server are available [here](https://github.com/ONLYOFFICE/Docker-CommunityServer/blob/master/docker-compose.workspace_enterprise.yml#L87).
|
||||
|
||||
To learn more, refer to the [ONLYOFFICE Mail Server documentation](https://github.com/ONLYOFFICE/Docker-MailServer "ONLYOFFICE Mail Server documentation").
|
||||
|
||||
**STEP 5**: Install ONLYOFFICE Community Server
|
||||
**STEP 6**: Install ONLYOFFICE Community Server
|
||||
|
||||
```bash
|
||||
sudo docker run --net onlyoffice -i -t -d --restart=always --name onlyoffice-community-server -p 80:80 -p 443:443 -p 5222:5222 \
|
||||
sudo docker run --net onlyoffice -i -t -d --privileged --restart=always --name onlyoffice-community-server -p 80:80 -p 443:443 -p 5222:5222 --cgroupns=host \
|
||||
-e MYSQL_SERVER_ROOT_PASSWORD=my-secret-pw \
|
||||
-e MYSQL_SERVER_DB_NAME=onlyoffice \
|
||||
-e MYSQL_SERVER_HOST=onlyoffice-mysql-server \
|
||||
|
@ -236,6 +464,9 @@ sudo docker run --net onlyoffice -i -t -d --restart=always --name onlyoffice-com
|
|||
-e MYSQL_SERVER_PASS=onlyoffice_pass \
|
||||
|
||||
-e DOCUMENT_SERVER_PORT_80_TCP_ADDR=onlyoffice-document-server \
|
||||
-e DOCUMENT_SERVER_JWT_ENABLED=true \
|
||||
-e DOCUMENT_SERVER_JWT_SECRET=${JWT_SECRET} \
|
||||
-e DOCUMENT_SERVER_JWT_HEADER=AuthorizationJwt \
|
||||
|
||||
-e MAIL_SERVER_API_HOST=${MAIL_SERVER_IP} \
|
||||
-e MAIL_SERVER_DB_HOST=onlyoffice-mysql-server \
|
||||
|
@ -246,12 +477,14 @@ sudo docker run --net onlyoffice -i -t -d --restart=always --name onlyoffice-com
|
|||
|
||||
-v /app/onlyoffice/CommunityServer/data:/var/www/onlyoffice/Data \
|
||||
-v /app/onlyoffice/CommunityServer/logs:/var/log/onlyoffice \
|
||||
-v /app/onlyoffice/CommunityServer/letsencrypt:/etc/letsencrypt \
|
||||
-v /sys/fs/cgroup:/sys/fs/cgroup:rw \
|
||||
onlyoffice/communityserver
|
||||
```
|
||||
|
||||
Where `${MAIL_SERVER_IP}` is the IP address for **ONLYOFFICE Mail Server**. You can easily get it using the command:
|
||||
```
|
||||
docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' onlyoffice-mail-server
|
||||
MAIL_SERVER_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' onlyoffice-mail-server)
|
||||
```
|
||||
|
||||
Alternatively, you can use an automatic installation script to install the whole ONLYOFFICE Community Edition at once. For the mail server correct work you need to specify its hostname 'yourdomain.com'.
|
||||
|
@ -259,7 +492,7 @@ Alternatively, you can use an automatic installation script to install the whole
|
|||
**STEP 1**: Download the Community Edition Docker script file
|
||||
|
||||
```bash
|
||||
wget http://download.onlyoffice.com/install/opensource-install.sh
|
||||
wget https://download.onlyoffice.com/install/opensource-install.sh
|
||||
```
|
||||
|
||||
**STEP 2**: Install ONLYOFFICE Community Edition executing the following command:
|
||||
|
@ -271,10 +504,34 @@ bash opensource-install.sh -md yourdomain.com
|
|||
Or, use [docker-compose](https://docs.docker.com/compose/install "docker-compose"). For the mail server correct work you need to specify its hostname 'yourdomain.com'. Assuming you have docker-compose installed, execute the following command:
|
||||
|
||||
```bash
|
||||
wget https://raw.githubusercontent.com/ONLYOFFICE/Docker-CommunityServer/master/docker-compose.yml
|
||||
wget https://raw.githubusercontent.com/ONLYOFFICE/Docker-CommunityServer/master/docker-compose.groups.yml
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
## ONLYOFFICE Document Server ipv6 setup
|
||||
|
||||
(Works and is supported only for Linux hosts)
|
||||
|
||||
Docker does not currently provide ipv6 addresses to containers by default. This function is experimental now.
|
||||
|
||||
To set up interaction via ipv6, you need to enable support for this feature in your Docker. For this you need:
|
||||
- create the `/etc/docker/daemon.json` file with the following content:
|
||||
|
||||
```
|
||||
{
|
||||
"ipv6": true,
|
||||
"fixed-cidr-v6": "2001:db8:abc1::/64"
|
||||
}
|
||||
```
|
||||
- restart docker with the following command: `systemctl restart docker`
|
||||
|
||||
After that, all running containers receive an ipv6 address and have an inet6 interface.
|
||||
|
||||
You can check your default bridge network and see the field there
|
||||
`EnableIPv6=true`. A new ipv6 subnet will also be added.
|
||||
|
||||
For more information, visit the official [Docker manual site](https://docs.docker.com/config/daemon/ipv6/)
|
||||
|
||||
## Issues
|
||||
|
||||
### Docker Issues
|
||||
|
@ -309,7 +566,9 @@ SaaS version: [https://www.onlyoffice.com/cloud-office.aspx](https://www.onlyoff
|
|||
|
||||
## User Feedback and Support
|
||||
|
||||
If you have any problems with or questions about this image, please visit our official forum to find answers to your questions: [dev.onlyoffice.org][1] or you can ask and answer ONLYOFFICE development questions on [Stack Overflow][2].
|
||||
If you have any problems with or questions about this image, please visit our official forum to find answers to your questions: [forum.onlyoffice.com][1] or you can ask and answer ONLYOFFICE development questions on [Stack Overflow][2].
|
||||
|
||||
[1]: http://dev.onlyoffice.org
|
||||
[2]: http://stackoverflow.com/questions/tagged/onlyoffice
|
||||
[1]: https://forum.onlyoffice.com
|
||||
[2]: https://stackoverflow.com/questions/tagged/onlyoffice
|
||||
|
||||
</details>
|
||||
|
|
31
activemq.yml
31
activemq.yml
|
@ -1,31 +0,0 @@
|
|||
version: '2'
|
||||
services:
|
||||
onlyoffice-documentserver:
|
||||
container_name: onlyoffice-documentserver
|
||||
image: onlyoffice/4testing-documentserver-ie:latest
|
||||
environment:
|
||||
- AMQP_SERVER_URL=amqp://guest:guest@onlyoffice-activemq
|
||||
- AMQP_SERVER_TYPE=activemq
|
||||
stdin_open: true
|
||||
restart: always
|
||||
ports:
|
||||
- '80:80'
|
||||
- '443:443'
|
||||
networks:
|
||||
- onlyoffice
|
||||
|
||||
onlyoffice-activemq:
|
||||
container_name: onlyoffice-activemq
|
||||
image: webcenter/activemq:5.14.3
|
||||
environment:
|
||||
- ACTIVEMQ_USERS_guest=guest
|
||||
- ACTIVEMQ_GROUPS_owners=guest
|
||||
restart: always
|
||||
networks:
|
||||
- onlyoffice
|
||||
expose:
|
||||
- '5672'
|
||||
|
||||
networks:
|
||||
onlyoffice:
|
||||
driver: 'bridge'
|
18
build_and_push_all_images.py
Normal file
18
build_and_push_all_images.py
Normal file
|
@ -0,0 +1,18 @@
|
|||
import requests
|
||||
import os
|
||||
|
||||
response = requests.get("https://api.github.com/repos/ONLYOFFICE/DocumentServer/releases/latest")
|
||||
|
||||
print('docker buildx build --push --platform linux/arm64,linux/amd64 --tag jiriks74/onlyoffice-documentserver:latest .')
|
||||
os.system(f'docker buildx build --push --platform linux/arm64,linux/amd64 --tag jiriks74/onlyoffice-documentserver:latest .')
|
||||
print("///////////////////////////////////////////////////////////////////////////")
|
||||
print('Build and push ":latest" .........................................finished')
|
||||
print("///////////////////////////////////////////////////////////////////////////")
|
||||
print()
|
||||
|
||||
print(f'docker buildx build --push --platform linux/arm64,linux/amd64 --tag jiriks74/onlyoffice-documentserver:{response.json()["name"].replace("ONLYOFFICE-DocumentServer-", "")} .')
|
||||
os.system(f'docker buildx build --push --platform linux/arm64,linux/amd64 --tag jiriks74/onlyoffice-documentserver:{response.json()["name"].replace("ONLYOFFICE-DocumentServer-", "")} .')
|
||||
print("///////////////////////////////////////////////////////////////////////////")
|
||||
print(f'Build and push ":{response.json()["name"].replace("ONLYOFFICE-DocumentServer-", "")}".........................................finished')
|
||||
print("///////////////////////////////////////////////////////////////////////////")
|
||||
|
|
@ -1,196 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: nginx
|
||||
# Required-Start: $local_fs $remote_fs $network $syslog $named
|
||||
# Required-Stop: $local_fs $remote_fs $network $syslog $named
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: starts the nginx web server
|
||||
# Description: starts nginx using start-stop-daemon
|
||||
### END INIT INFO
|
||||
|
||||
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
|
||||
DAEMON=/usr/sbin/nginx
|
||||
NAME=nginx
|
||||
DESC=nginx
|
||||
|
||||
# Include nginx defaults if available
|
||||
if [ -r /etc/default/nginx ]; then
|
||||
. /etc/default/nginx
|
||||
fi
|
||||
|
||||
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
|
||||
|
||||
test -x $DAEMON || exit 0
|
||||
|
||||
. /lib/init/vars.sh
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
# Try to extract nginx pidfile
|
||||
PID=$(cat /etc/nginx/nginx.conf | grep -Ev '^\s*#' | awk 'BEGIN { RS="[;{}]" } { if ($1 == "pid") print $2 }' | head -n1)
|
||||
if [ -z "$PID" ]; then
|
||||
PID=/tmp/nginx.pid
|
||||
fi
|
||||
|
||||
if [ -n "$ULIMIT" ]; then
|
||||
# Set ulimit if it is set in /etc/default/nginx
|
||||
ulimit $ULIMIT
|
||||
fi
|
||||
|
||||
start_nginx() {
|
||||
# Start the daemon/service
|
||||
#
|
||||
# Returns:
|
||||
# 0 if daemon has been started
|
||||
# 1 if daemon was already running
|
||||
# 2 if daemon could not be started
|
||||
start-stop-daemon --start --quiet --pidfile $PID --chuid www-data:www-data --exec $DAEMON --test > /dev/null \
|
||||
|| return 1
|
||||
start-stop-daemon --start --quiet --pidfile $PID --chuid www-data:www-data --exec $DAEMON -- \
|
||||
$DAEMON_OPTS 2>/dev/null \
|
||||
|| return 2
|
||||
}
|
||||
|
||||
test_config() {
|
||||
# Test the nginx configuration
|
||||
$DAEMON -t $DAEMON_OPTS >/dev/null 2>&1
|
||||
}
|
||||
|
||||
stop_nginx() {
|
||||
# Stops the daemon/service
|
||||
#
|
||||
# Return
|
||||
# 0 if daemon has been stopped
|
||||
# 1 if daemon was already stopped
|
||||
# 2 if daemon could not be stopped
|
||||
# other if a failure occurred
|
||||
start-stop-daemon --stop --quiet --retry=$STOP_SCHEDULE --pidfile $PID --name $NAME
|
||||
RETVAL="$?"
|
||||
sleep 1
|
||||
return "$RETVAL"
|
||||
}
|
||||
|
||||
reload_nginx() {
|
||||
# Function that sends a SIGHUP to the daemon/service
|
||||
start-stop-daemon --stop --signal HUP --quiet --pidfile $PID --name $NAME
|
||||
return 0
|
||||
}
|
||||
|
||||
rotate_logs() {
|
||||
# Rotate log files
|
||||
start-stop-daemon --stop --signal USR1 --quiet --pidfile $PID --name $NAME
|
||||
return 0
|
||||
}
|
||||
|
||||
upgrade_nginx() {
|
||||
# Online upgrade nginx executable
|
||||
# http://nginx.org/en/docs/control.html
|
||||
#
|
||||
# Return
|
||||
# 0 if nginx has been successfully upgraded
|
||||
# 1 if nginx is not running
|
||||
# 2 if the pid files were not created on time
|
||||
# 3 if the old master could not be killed
|
||||
if start-stop-daemon --stop --signal USR2 --quiet --pidfile $PID --name $NAME; then
|
||||
# Wait for both old and new master to write their pid file
|
||||
while [ ! -s "${PID}.oldbin" ] || [ ! -s "${PID}" ]; do
|
||||
cnt=`expr $cnt + 1`
|
||||
if [ $cnt -gt 10 ]; then
|
||||
return 2
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
# Everything is ready, gracefully stop the old master
|
||||
if start-stop-daemon --stop --signal QUIT --quiet --pidfile "${PID}.oldbin" --name $NAME; then
|
||||
return 0
|
||||
else
|
||||
return 3
|
||||
fi
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
log_daemon_msg "Starting $DESC" "$NAME"
|
||||
start_nginx
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 ;;
|
||||
2) log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
stop_nginx
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 ;;
|
||||
2) log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
restart)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
|
||||
# Check configuration before stopping nginx
|
||||
if ! test_config; then
|
||||
log_end_msg 1 # Configuration error
|
||||
exit $?
|
||||
fi
|
||||
|
||||
stop_nginx
|
||||
case "$?" in
|
||||
0|1)
|
||||
start_nginx
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
reload|force-reload)
|
||||
log_daemon_msg "Reloading $DESC configuration" "$NAME"
|
||||
|
||||
# Check configuration before stopping nginx
|
||||
#
|
||||
# This is not entirely correct since the on-disk nginx binary
|
||||
# may differ from the in-memory one, but that's not common.
|
||||
# We prefer to check the configuration and return an error
|
||||
# to the administrator.
|
||||
if ! test_config; then
|
||||
log_end_msg 1 # Configuration error
|
||||
exit $?
|
||||
fi
|
||||
|
||||
reload_nginx
|
||||
log_end_msg $?
|
||||
;;
|
||||
configtest|testconfig)
|
||||
log_daemon_msg "Testing $DESC configuration"
|
||||
test_config
|
||||
log_end_msg $?
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p $PID "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||
;;
|
||||
upgrade)
|
||||
log_daemon_msg "Upgrading binary" "$NAME"
|
||||
upgrade_nginx
|
||||
log_end_msg $?
|
||||
;;
|
||||
rotate)
|
||||
log_daemon_msg "Re-opening $DESC log files" "$NAME"
|
||||
rotate_logs
|
||||
log_end_msg $?
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $NAME {start|stop|restart|reload|force-reload|status|configtest|rotate|upgrade}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
|
@ -1,63 +0,0 @@
|
|||
user www-data;
|
||||
worker_processes 1;
|
||||
pid /tmp/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 524288;
|
||||
# multi_accept on;
|
||||
}
|
||||
|
||||
http {
|
||||
|
||||
##
|
||||
# Basic Settings
|
||||
##
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
# server_tokens off;
|
||||
|
||||
# server_names_hash_bucket_size 64;
|
||||
# server_name_in_redirect off;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
##
|
||||
# SSL Settings
|
||||
##
|
||||
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
##
|
||||
# Logging Settings
|
||||
##
|
||||
|
||||
access_log off;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
##
|
||||
# Gzip Settings
|
||||
##
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
|
||||
# gzip_vary on;
|
||||
# gzip_proxied any;
|
||||
# gzip_comp_level 6;
|
||||
# gzip_buffers 16 8k;
|
||||
# gzip_http_version 1.1;
|
||||
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
|
||||
##
|
||||
# Virtual Host Configs
|
||||
##
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
include /etc/nginx/sites-enabled/*;
|
||||
}
|
13
config/supervisor/ds/ds-converter.conf
Normal file
13
config/supervisor/ds/ds-converter.conf
Normal file
|
@ -0,0 +1,13 @@
|
|||
[program:converter]
|
||||
command=/var/www/COMPANY_NAME/documentserver/server/FileConverter/converter
|
||||
directory=/var/www/COMPANY_NAME/documentserver/server/FileConverter
|
||||
user=ds
|
||||
environment=NODE_ENV=production-linux,NODE_CONFIG_DIR=/etc/COMPANY_NAME/documentserver,NODE_DISABLE_COLORS=1,APPLICATION_NAME=COMPANY_NAME
|
||||
stdout_logfile=/var/log/COMPANY_NAME/documentserver/converter/out.log
|
||||
stdout_logfile_backups=0
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/var/log/COMPANY_NAME/documentserver/converter/err.log
|
||||
stderr_logfile_backups=0
|
||||
stderr_logfile_maxbytes=0
|
||||
autostart=true
|
||||
autorestart=true
|
13
config/supervisor/ds/ds-docservice.conf
Normal file
13
config/supervisor/ds/ds-docservice.conf
Normal file
|
@ -0,0 +1,13 @@
|
|||
[program:docservice]
|
||||
command=/var/www/COMPANY_NAME/documentserver/server/DocService/docservice
|
||||
directory=/var/www/COMPANY_NAME/documentserver/server/DocService
|
||||
user=ds
|
||||
environment=NODE_ENV=production-linux,NODE_CONFIG_DIR=/etc/COMPANY_NAME/documentserver,NODE_DISABLE_COLORS=1
|
||||
stdout_logfile=/var/log/COMPANY_NAME/documentserver/docservice/out.log
|
||||
stdout_logfile_backups=0
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/var/log/COMPANY_NAME/documentserver/docservice/err.log
|
||||
stderr_logfile_backups=0
|
||||
stderr_logfile_maxbytes=0
|
||||
autostart=true
|
||||
autorestart=true
|
14
config/supervisor/ds/ds-example.conf
Normal file
14
config/supervisor/ds/ds-example.conf
Normal file
|
@ -0,0 +1,14 @@
|
|||
[program:example]
|
||||
command=/var/www/COMPANY_NAME/documentserver-example/example
|
||||
directory=/var/www/COMPANY_NAME/documentserver-example/
|
||||
user=ds
|
||||
environment=NODE_ENV=production-linux,NODE_CONFIG_DIR=/etc/COMPANY_NAME/documentserver-example,NODE_DISABLE_COLORS=1
|
||||
stdout_logfile=/var/log/COMPANY_NAME/documentserver-example/out.log
|
||||
stdout_logfile_backups=0
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/var/log/COMPANY_NAME/documentserver-example/err.log
|
||||
stderr_logfile_backups=0
|
||||
stderr_logfile_maxbytes=0
|
||||
autostart=false
|
||||
autorestart=true
|
||||
redirect_stderr=true
|
13
config/supervisor/ds/ds-metrics.conf
Normal file
13
config/supervisor/ds/ds-metrics.conf
Normal file
|
@ -0,0 +1,13 @@
|
|||
[program:metrics]
|
||||
command=/var/www/COMPANY_NAME/documentserver/server/Metrics/metrics ./config/config.js
|
||||
directory=/var/www/COMPANY_NAME/documentserver/server/Metrics
|
||||
user=ds
|
||||
environment=NODE_DISABLE_COLORS=1
|
||||
stdout_logfile=/var/log/COMPANY_NAME/documentserver/metrics/out.log
|
||||
stdout_logfile_backups=0
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/var/log/COMPANY_NAME/documentserver/metrics/err.log
|
||||
stderr_logfile_backups=0
|
||||
stderr_logfile_maxbytes=0
|
||||
autostart=true
|
||||
autorestart=true
|
2
config/supervisor/ds/ds.conf
Normal file
2
config/supervisor/ds/ds.conf
Normal file
|
@ -0,0 +1,2 @@
|
|||
[group:ds]
|
||||
programs=docservice,converter,metrics,example
|
|
@ -30,8 +30,8 @@ DESC=supervisor
|
|||
|
||||
test -x $DAEMON || exit 0
|
||||
|
||||
LOGDIR=/tmp
|
||||
PIDFILE=/tmp/$NAME.pid
|
||||
LOGDIR=/var/log/supervisor
|
||||
PIDFILE=/var/run/$NAME.pid
|
||||
PS_COUNT=0
|
||||
DODTIME=5 # Time to wait for the server to die, in seconds
|
||||
# If this value is set too low you might not
|
||||
|
@ -101,7 +101,7 @@ case "$1" in
|
|||
rm -f "$PIDFILE"
|
||||
fi
|
||||
echo -n "Starting $DESC: "
|
||||
start-stop-daemon --start --quiet --chuid ds:ds --pidfile $PIDFILE \
|
||||
start-stop-daemon --start --quiet --pidfile $PIDFILE \
|
||||
--startas $DAEMON -- $DAEMON_OPTS
|
||||
test -f $PIDFILE || sleep 1
|
||||
if running ; then
|
||||
|
@ -152,7 +152,7 @@ case "$1" in
|
|||
echo -n "Restarting $DESC: "
|
||||
start-stop-daemon --stop --quiet --oknodo --pidfile $PIDFILE
|
||||
[ -n "$DODTIME" ] && sleep $DODTIME
|
||||
start-stop-daemon --start --quiet --chuid ds:ds --pidfile $PIDFILE \
|
||||
start-stop-daemon --start --quiet --pidfile $PIDFILE \
|
||||
--startas $DAEMON -- $DAEMON_OPTS
|
||||
echo "$NAME."
|
||||
;;
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
; supervisor config file
|
||||
|
||||
[inet_http_server]
|
||||
port = 127.0.0.1:9001
|
||||
|
||||
[supervisord]
|
||||
logfile=/tmp/supervisord.log ; (main log file;default $CWD/supervisord.log)
|
||||
pidfile=/tmp/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
|
||||
childlogdir=/tmp ; ('AUTO' child log dir, default $TEMP)
|
||||
|
||||
; the below section must remain in the config file for RPC
|
||||
; (supervisorctl/web interface) to work, additional interfaces may be
|
||||
; added by defining them in separate rpcinterface: sections
|
||||
[rpcinterface:supervisor]
|
||||
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
|
||||
|
||||
[supervisorctl]
|
||||
serverurl = http://localhost:9001 ; use a unix:// URL for a unix socket
|
||||
|
||||
; The [include] section can just contain the "files" setting. This
|
||||
; setting can list multiple files (separated by whitespace or
|
||||
; newlines). It can also contain wildcards. The filenames are
|
||||
; interpreted as relative to this file. Included files *cannot*
|
||||
; include files themselves.
|
||||
|
||||
[include]
|
||||
files = /etc/supervisor/conf.d/*.conf
|
11
default.nix
Normal file
11
default.nix
Normal file
|
@ -0,0 +1,11 @@
|
|||
{ pkgs ? (import <nixpkgs> {
|
||||
config.allowUnfree = true;
|
||||
}),
|
||||
}:
|
||||
pkgs.mkShell {
|
||||
pure = true;
|
||||
packages = with pkgs; [
|
||||
# Choose the build tools that you need
|
||||
act
|
||||
];
|
||||
}
|
169
docker-bake.hcl
Normal file
169
docker-bake.hcl
Normal file
|
@ -0,0 +1,169 @@
|
|||
variable "TAG" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "SHORTER_TAG" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "SHORTEST_TAG" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "PULL_TAG" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "COMPANY_NAME" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "PREFIX_NAME" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "PRODUCT_EDITION" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "PRODUCT_NAME" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "PACKAGE_VERSION" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "DOCKERFILE" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "PLATFORM" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "PACKAGE_BASEURL" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "PACKAGE_FILE" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "BUILD_CHANNEL" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "PUSH_MAJOR" {
|
||||
default = "false"
|
||||
}
|
||||
|
||||
variable "LATEST" {
|
||||
default = "false"
|
||||
}
|
||||
|
||||
### ↓ Variables for UCS build ↓
|
||||
|
||||
variable "BASE_IMAGE" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "PG_VERSION" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "UCS_REBUILD" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "UCS_PREFIX" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
### ↑ Variables for UCS build ↑
|
||||
|
||||
target "documentserver" {
|
||||
target = "documentserver"
|
||||
dockerfile = "${DOCKERFILE}"
|
||||
tags = [
|
||||
"docker.io/${COMPANY_NAME}/${PREFIX_NAME}${PRODUCT_NAME}${PRODUCT_EDITION}:${TAG}",
|
||||
equal("nightly",BUILD_CHANNEL) ? "docker.io/${COMPANY_NAME}/${PREFIX_NAME}${PRODUCT_NAME}${PRODUCT_EDITION}:latest": "",
|
||||
]
|
||||
platforms = ["${PLATFORM}"]
|
||||
args = {
|
||||
"COMPANY_NAME": "${COMPANY_NAME}"
|
||||
"PRODUCT_NAME": "${PRODUCT_NAME}"
|
||||
"PRODUCT_EDITION": "${PRODUCT_EDITION}"
|
||||
"PACKAGE_VERSION": "${PACKAGE_VERSION}"
|
||||
"PACKAGE_BASEURL": "${PACKAGE_BASEURL}"
|
||||
"PLATFORM": "${PLATFORM}"
|
||||
}
|
||||
}
|
||||
|
||||
target "documentserver-stable" {
|
||||
target = "documentserver-stable"
|
||||
dockerfile = "production.dockerfile"
|
||||
tags = ["docker.io/${COMPANY_NAME}/${PREFIX_NAME}${PRODUCT_NAME}${PRODUCT_EDITION}:${TAG}",
|
||||
"docker.io/${COMPANY_NAME}/${PREFIX_NAME}${PRODUCT_NAME}${PRODUCT_EDITION}:${SHORTER_TAG}",
|
||||
"docker.io/${COMPANY_NAME}/${PREFIX_NAME}${PRODUCT_NAME}${PRODUCT_EDITION}:${SHORTEST_TAG}",
|
||||
"docker.io/${COMPANY_NAME}/${PREFIX_NAME}${PRODUCT_NAME}${PRODUCT_EDITION}:latest",
|
||||
equal("-ee",PRODUCT_EDITION) ? "docker.io/${COMPANY_NAME}4enterprise/${PREFIX_NAME}${PRODUCT_NAME}${PRODUCT_EDITION}:${TAG}": "",]
|
||||
platforms = ["linux/amd64", "linux/arm64"]
|
||||
args = {
|
||||
"PULL_TAG": "${PULL_TAG}"
|
||||
"COMPANY_NAME": "${COMPANY_NAME}"
|
||||
"PRODUCT_NAME": "${PRODUCT_NAME}"
|
||||
"PRODUCT_EDITION": "${PRODUCT_EDITION}"
|
||||
}
|
||||
}
|
||||
|
||||
target "documentserver-ucs" {
|
||||
target = "documentserver"
|
||||
dockerfile = "${DOCKERFILE}"
|
||||
tags = [
|
||||
"docker.io/${COMPANY_NAME}/${PRODUCT_NAME}${PRODUCT_EDITION}-ucs:${TAG}"
|
||||
]
|
||||
platforms = ["linux/amd64", "linux/arm64"]
|
||||
args = {
|
||||
"PRODUCT_EDITION": "${PRODUCT_EDITION}"
|
||||
"PRODUCT_NAME": "${PRODUCT_NAME}"
|
||||
"COMPANY_NAME": "${COMPANY_NAME}"
|
||||
"PACKAGE_VERSION": "${PACKAGE_VERSION}"
|
||||
"PACKAGE_BASEURL": "${PACKAGE_BASEURL}"
|
||||
"BASE_IMAGE": "${BASE_IMAGE}"
|
||||
"PG_VERSION": "${PG_VERSION}"
|
||||
}
|
||||
}
|
||||
|
||||
target "documentserver-nonexample" {
|
||||
target = "documentserver-nonexample"
|
||||
dockerfile = "production.dockerfile"
|
||||
tags = [ "docker.io/${COMPANY_NAME}/${PRODUCT_NAME}${PREFIX_NAME}${PRODUCT_EDITION}:${TAG}-nonexample" ]
|
||||
platforms = ["linux/amd64", "linux/arm64"]
|
||||
args = {
|
||||
"PULL_TAG": "${PULL_TAG}"
|
||||
"COMPANY_NAME": "${COMPANY_NAME}"
|
||||
"PRODUCT_NAME": "${PRODUCT_NAME}"
|
||||
"PRODUCT_EDITION": "${PRODUCT_EDITION}"
|
||||
}
|
||||
}
|
||||
|
||||
target "documentserver-stable-rebuild" {
|
||||
target = "documentserver-stable-rebuild"
|
||||
dockerfile = "production.dockerfile"
|
||||
tags = equal("true",UCS_REBUILD) ? ["docker.io/${COMPANY_NAME}/${PREFIX_NAME}${PRODUCT_NAME}${PRODUCT_EDITION}-ucs:${TAG}",] : [
|
||||
"docker.io/${COMPANY_NAME}/${PREFIX_NAME}${PRODUCT_NAME}${PRODUCT_EDITION}:${TAG}",
|
||||
equal("",PREFIX_NAME) ? "docker.io/${COMPANY_NAME}/${PREFIX_NAME}${PRODUCT_NAME}${PRODUCT_EDITION}:${SHORTER_TAG}": "",
|
||||
equal("true",PUSH_MAJOR) ? "docker.io/${COMPANY_NAME}/${PREFIX_NAME}${PRODUCT_NAME}${PRODUCT_EDITION}:${SHORTEST_TAG}": "",
|
||||
equal("",PREFIX_NAME) && equal("true",LATEST) ? "docker.io/${COMPANY_NAME}/${PREFIX_NAME}${PRODUCT_NAME}${PRODUCT_EDITION}:latest": "",
|
||||
equal("-ee",PRODUCT_EDITION) && equal("",PREFIX_NAME) ? "docker.io/${COMPANY_NAME}4enterprise/${PREFIX_NAME}${PRODUCT_NAME}${PRODUCT_EDITION}:${TAG}": "",
|
||||
]
|
||||
platforms = ["linux/amd64", "linux/arm64"]
|
||||
args = {
|
||||
"UCS_PREFIX": "${UCS_PREFIX}"
|
||||
"PULL_TAG": "${PULL_TAG}"
|
||||
"COMPANY_NAME": "${COMPANY_NAME}"
|
||||
"PRODUCT_NAME": "${PRODUCT_NAME}"
|
||||
"PRODUCT_EDITION": "${PRODUCT_EDITION}"
|
||||
}
|
||||
}
|
|
@ -1,117 +1,61 @@
|
|||
version: '2'
|
||||
services:
|
||||
onlyoffice-documentserver-data:
|
||||
container_name: onlyoffice-documentserver-data
|
||||
image: onlyoffice/documentserver:latest
|
||||
onlyoffice-documentserver:
|
||||
build:
|
||||
context: .
|
||||
container_name: onlyoffice-documentserver
|
||||
depends_on:
|
||||
- onlyoffice-postgresql
|
||||
- onlyoffice-rabbitmq
|
||||
environment:
|
||||
- ONLYOFFICE_DATA_CONTAINER=true
|
||||
- POSTGRESQL_SERVER_HOST=onlyoffice-postgresql
|
||||
- POSTGRESQL_SERVER_PORT=5432
|
||||
- POSTGRESQL_SERVER_DB_NAME=onlyoffice
|
||||
- POSTGRESQL_SERVER_USER=onlyoffice
|
||||
- AMQP_SERVER_URL=amqp://guest:guest@onlyoffice-rabbitmq
|
||||
- REDIS_SERVER_HOST=onlyoffice-redis
|
||||
- REDIS_SERVER_PORT=6379
|
||||
- DB_TYPE=postgres
|
||||
- DB_HOST=onlyoffice-postgresql
|
||||
- DB_PORT=5432
|
||||
- DB_NAME=onlyoffice
|
||||
- DB_USER=onlyoffice
|
||||
- AMQP_URI=amqp://guest:guest@onlyoffice-rabbitmq
|
||||
# Uncomment strings below to enable the JSON Web Token validation.
|
||||
#- JWT_ENABLED=true
|
||||
#- JWT_SECRET=secret
|
||||
#- JWT_HEADER=Authorization
|
||||
#- JWT_HEADER=AuthorizationJwt
|
||||
#- JWT_IN_BODY=true
|
||||
# Uncomment the line below to set larger file limits (about 1GB)
|
||||
#- LARGER_FILE_LIMITS=true
|
||||
ports:
|
||||
- '80:80'
|
||||
- '443:443'
|
||||
stdin_open: true
|
||||
restart: always
|
||||
networks:
|
||||
- onlyoffice
|
||||
stop_grace_period: 120s
|
||||
volumes:
|
||||
- /etc/onlyoffice
|
||||
# Uncomment the line below to get access to the slide themes directory.
|
||||
# To use the themes, copy them to the slideThemes directory and run `docker exec -it <container-name> /usr/bin/documentserver-generate-allfonts.sh`
|
||||
#- ./slideThemes:/var/www/onlyoffice/documentserver/sdkjs/slide/themes/src
|
||||
- /var/www/onlyoffice/Data
|
||||
- /var/log/onlyoffice
|
||||
- /var/lib/onlyoffice/documentserver/App_Data/cache/files
|
||||
- /var/www/onlyoffice/documentserver-example/public/files
|
||||
- /usr/share/fonts
|
||||
|
||||
onlyoffice-documentserver:
|
||||
image: onlyoffice/documentserver:latest
|
||||
depends_on:
|
||||
- onlyoffice-documentserver-data
|
||||
- onlyoffice-postgresql
|
||||
- onlyoffice-redis
|
||||
- onlyoffice-rabbitmq
|
||||
environment:
|
||||
- ONLYOFFICE_DATA_CONTAINER_HOST=onlyoffice-documentserver-data
|
||||
- BALANCE=uri depth 3
|
||||
- EXCLUDE_PORTS=443
|
||||
- HTTP_CHECK=GET /healthcheck
|
||||
- EXTRA_SETTINGS=http-check expect string true
|
||||
# Uncomment the string below to redirect HTTP request to HTTPS request.
|
||||
#- FORCE_SSL=true
|
||||
stdin_open: true
|
||||
restart: always
|
||||
networks:
|
||||
- onlyoffice
|
||||
expose:
|
||||
- '80'
|
||||
volumes_from:
|
||||
- onlyoffice-documentserver-data
|
||||
|
||||
onlyoffice-haproxy:
|
||||
container_name: onlyoffice-haproxy
|
||||
image: dockercloud/haproxy:1.5.1
|
||||
depends_on:
|
||||
- onlyoffice-documentserver
|
||||
environment:
|
||||
- MODE=http
|
||||
# Uncomment the string below to specify the path of ssl certificates
|
||||
#- CERT_FOLDER=/certs/
|
||||
stdin_open: true
|
||||
links:
|
||||
- onlyoffice-documentserver
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
# Uncomment the string below to map a ssl certificate from host
|
||||
# to the proxy container
|
||||
#- /app/onlyoffice/DocumentServer/data/certs/onlyoffice.pem:/certs/cert1.pem
|
||||
restart: always
|
||||
networks:
|
||||
- onlyoffice
|
||||
ports:
|
||||
- '80:80'
|
||||
- '443:443'
|
||||
- '1936:1936'
|
||||
|
||||
onlyoffice-redis:
|
||||
container_name: onlyoffice-redis
|
||||
image: redis
|
||||
restart: always
|
||||
networks:
|
||||
- onlyoffice
|
||||
expose:
|
||||
- '6379'
|
||||
|
||||
onlyoffice-rabbitmq:
|
||||
container_name: onlyoffice-rabbitmq
|
||||
image: rabbitmq
|
||||
restart: always
|
||||
networks:
|
||||
- onlyoffice
|
||||
expose:
|
||||
- '5672'
|
||||
|
||||
onlyoffice-postgresql:
|
||||
container_name: onlyoffice-postgresql
|
||||
image: postgres:9.5
|
||||
image: postgres:12
|
||||
environment:
|
||||
- POSTGRES_DB=onlyoffice
|
||||
- POSTGRES_USER=onlyoffice
|
||||
networks:
|
||||
- onlyoffice
|
||||
- POSTGRES_HOST_AUTH_METHOD=trust
|
||||
restart: always
|
||||
expose:
|
||||
- '5432'
|
||||
volumes:
|
||||
- postgresql_data:/var/lib/postgresql
|
||||
|
||||
networks:
|
||||
onlyoffice:
|
||||
driver: 'bridge'
|
||||
|
||||
volumes:
|
||||
postgresql_data:
|
||||
|
|
33
production.dockerfile
Normal file
33
production.dockerfile
Normal file
|
@ -0,0 +1,33 @@
|
|||
### Arguments avavlivable only for FROM instruction ###
|
||||
ARG PULL_TAG=latest
|
||||
ARG COMPANY_NAME=onlyoffice
|
||||
ARG PRODUCT_EDITION=
|
||||
### Rebuild arguments
|
||||
ARG UCS_PREFIX=
|
||||
ARG IMAGE=${COMPANY_NAME}/documentserver${PRODUCT_EDITION}${UCS_PREFIX}:${PULL_TAG}
|
||||
|
||||
### Build main-release ###
|
||||
|
||||
FROM ${COMPANY_NAME}/4testing-documentserver${PRODUCT_EDITION}:${PULL_TAG} as documentserver-stable
|
||||
|
||||
### Rebuild stable images with secure updates
|
||||
FROM ${IMAGE} as documentserver-stable-rebuild
|
||||
RUN echo "This is rebuild" \
|
||||
&& apt-get update -y \
|
||||
&& apt-get upgrade -y
|
||||
|
||||
### Build nonexample ###
|
||||
|
||||
FROM ${COMPANY_NAME}/documentserver${PRODUCT_EDITION}:${PULL_TAG} as documentserver-nonexample
|
||||
|
||||
ARG COMPANY_NAME=onlyoffice
|
||||
ARG PRODUCT_NAME=documentserver
|
||||
ARG DS_SUPERVISOR_CONF=/etc/supervisor/conf.d/ds.conf
|
||||
|
||||
### Remove all documentserver-example data ###
|
||||
|
||||
RUN rm -rf /var/www/$COMPANY_NAME/$PRODUCT_NAME-example \
|
||||
&& rm -rf /etc/$COMPANY_NAME/$PRODUCT_NAME-example \
|
||||
&& rm -f $DS_SUPERVISOR_CONF \
|
||||
&& rm -f /etc/nginx/includes/ds-example.conf \
|
||||
&& ln -s /etc/$COMPANY_NAME/$PRODUCT_NAME/supervisor/ds.conf $DS_SUPERVISOR_CONF
|
438
run-document-server.sh
Executable file → Normal file
438
run-document-server.sh
Executable file → Normal file
|
@ -1,26 +1,69 @@
|
|||
#!/bin/bash
|
||||
|
||||
umask 0022
|
||||
|
||||
function clean_exit {
|
||||
if [ ${ONLYOFFICE_DATA_CONTAINER} == "false" ] && \
|
||||
[ ${ONLYOFFICE_DATA_CONTAINER_HOST} == "localhost" ]; then
|
||||
/usr/bin/documentserver-prepare4shutdown.sh
|
||||
fi
|
||||
}
|
||||
|
||||
trap clean_exit SIGTERM
|
||||
|
||||
# Define '**' behavior explicitly
|
||||
shopt -s globstar
|
||||
|
||||
APP_DIR="/var/www/${COMPANY_NAME}/documentserver"
|
||||
DATA_DIR="/var/www/${COMPANY_NAME}/Data"
|
||||
PRIVATE_DATA_DIR="${DATA_DIR}/.private"
|
||||
DS_RELEASE_DATE="${PRIVATE_DATA_DIR}/ds_release_date"
|
||||
LOG_DIR="/var/log/${COMPANY_NAME}"
|
||||
DS_LOG_DIR="${LOG_DIR}/documentserver"
|
||||
LIB_DIR="/var/lib/${COMPANY_NAME}"
|
||||
DS_LIB_DIR="${LIB_DIR}/documentserver"
|
||||
CONF_DIR="/etc/${COMPANY_NAME}/documentserver"
|
||||
IS_UPGRADE="false"
|
||||
|
||||
ONLYOFFICE_DATA_CONTAINER=${ONLYOFFICE_DATA_CONTAINER:-false}
|
||||
ONLYOFFICE_DATA_CONTAINER_HOST=${ONLYOFFICE_DATA_CONTAINER_HOST:-localhost}
|
||||
ONLYOFFICE_DATA_CONTAINER_PORT=80
|
||||
|
||||
SSL_CERTIFICATES_DIR="${DATA_DIR}/certs"
|
||||
SSL_CERTIFICATE_PATH=${SSL_CERTIFICATE_PATH:-${SSL_CERTIFICATES_DIR}/onlyoffice.crt}
|
||||
SSL_KEY_PATH=${SSL_KEY_PATH:-${SSL_CERTIFICATES_DIR}/onlyoffice.key}
|
||||
RELEASE_DATE="$(stat -c="%y" ${APP_DIR}/server/DocService/docservice | sed -r 's/=([0-9]+)-([0-9]+)-([0-9]+) ([0-9:.+ ]+)/\1-\2-\3/')";
|
||||
if [ -f ${DS_RELEASE_DATE} ]; then
|
||||
PREV_RELEASE_DATE=$(head -n 1 ${DS_RELEASE_DATE})
|
||||
else
|
||||
PREV_RELEASE_DATE="0"
|
||||
fi
|
||||
|
||||
if [ "${RELEASE_DATE}" != "${PREV_RELEASE_DATE}" ]; then
|
||||
if [ ${ONLYOFFICE_DATA_CONTAINER} != "true" ]; then
|
||||
IS_UPGRADE="true";
|
||||
fi
|
||||
fi
|
||||
|
||||
SSL_CERTIFICATES_DIR="/usr/share/ca-certificates/ds"
|
||||
mkdir -p ${SSL_CERTIFICATES_DIR}
|
||||
if [[ -d ${DATA_DIR}/certs ]] && [ -e ${DATA_DIR}/certs/*.crt ]; then
|
||||
cp -f ${DATA_DIR}/certs/* ${SSL_CERTIFICATES_DIR}
|
||||
chmod 644 ${SSL_CERTIFICATES_DIR}/*.crt ${SSL_CERTIFICATES_DIR}/*.pem
|
||||
chmod 400 ${SSL_CERTIFICATES_DIR}/*.key
|
||||
fi
|
||||
|
||||
if [[ -z $SSL_CERTIFICATE_PATH ]] && [[ -f ${SSL_CERTIFICATES_DIR}/${COMPANY_NAME}.crt ]]; then
|
||||
SSL_CERTIFICATE_PATH=${SSL_CERTIFICATES_DIR}/${COMPANY_NAME}.crt
|
||||
else
|
||||
SSL_CERTIFICATE_PATH=${SSL_CERTIFICATE_PATH:-${SSL_CERTIFICATES_DIR}/tls.crt}
|
||||
fi
|
||||
if [[ -z $SSL_KEY_PATH ]] && [[ -f ${SSL_CERTIFICATES_DIR}/${COMPANY_NAME}.key ]]; then
|
||||
SSL_KEY_PATH=${SSL_CERTIFICATES_DIR}/${COMPANY_NAME}.key
|
||||
else
|
||||
SSL_KEY_PATH=${SSL_KEY_PATH:-${SSL_CERTIFICATES_DIR}/tls.key}
|
||||
fi
|
||||
CA_CERTIFICATES_PATH=${CA_CERTIFICATES_PATH:-${SSL_CERTIFICATES_DIR}/ca-certificates.pem}
|
||||
SSL_DHPARAM_PATH=${SSL_DHPARAM_PATH:-${SSL_CERTIFICATES_DIR}/dhparam.pem}
|
||||
SSL_VERIFY_CLIENT=${SSL_VERIFY_CLIENT:-off}
|
||||
USE_UNAUTHORIZED_STORAGE=${USE_UNAUTHORIZED_STORAGE:-false}
|
||||
ONLYOFFICE_HTTPS_HSTS_ENABLED=${ONLYOFFICE_HTTPS_HSTS_ENABLED:-true}
|
||||
ONLYOFFICE_HTTPS_HSTS_MAXAGE=${ONLYOFFICE_HTTPS_HSTS_MAXAGE:-31536000}
|
||||
SYSCONF_TEMPLATES_DIR="/app/ds/setup/config"
|
||||
|
@ -33,42 +76,100 @@ NGINX_ONLYOFFICE_EXAMPLE_CONF="${NGINX_ONLYOFFICE_EXAMPLE_PATH}/includes/ds-exam
|
|||
|
||||
NGINX_CONFIG_PATH="/etc/nginx/nginx.conf"
|
||||
NGINX_WORKER_PROCESSES=${NGINX_WORKER_PROCESSES:-1}
|
||||
NGINX_WORKER_CONNECTIONS=${NGINX_WORKER_CONNECTIONS:-$(ulimit -n)}
|
||||
# Limiting the maximum number of simultaneous connections due to possible memory shortage
|
||||
[ $(ulimit -n) -gt 1048576 ] && NGINX_WORKER_CONNECTIONS=${NGINX_WORKER_CONNECTIONS:-1048576} || NGINX_WORKER_CONNECTIONS=${NGINX_WORKER_CONNECTIONS:-$(ulimit -n)}
|
||||
|
||||
JWT_ENABLED=${JWT_ENABLED:-false}
|
||||
JWT_SECRET=${JWT_SECRET:-secret}
|
||||
JWT_ENABLED=${JWT_ENABLED:-true}
|
||||
|
||||
# validate user's vars before usinig in json
|
||||
if [ "${JWT_ENABLED}" == "true" ]; then
|
||||
JWT_ENABLED="true"
|
||||
else
|
||||
JWT_ENABLED="false"
|
||||
fi
|
||||
|
||||
[ -z $JWT_SECRET ] && JWT_MESSAGE='JWT is enabled by default. A random secret is generated automatically. Run the command "docker exec $(sudo docker ps -q) sudo documentserver-jwt-status.sh" to get information about JWT.'
|
||||
|
||||
JWT_SECRET=${JWT_SECRET:-$(pwgen -s 32)}
|
||||
JWT_HEADER=${JWT_HEADER:-Authorization}
|
||||
JWT_IN_BODY=${JWT_IN_BODY:-false}
|
||||
|
||||
WOPI_ENABLED=${WOPI_ENABLED:-false}
|
||||
ALLOW_META_IP_ADDRESS=${ALLOW_META_IP_ADDRESS:-false}
|
||||
ALLOW_PRIVATE_IP_ADDRESS=${ALLOW_PRIVATE_IP_ADDRESS:-false}
|
||||
|
||||
GENERATE_FONTS=${GENERATE_FONTS:-true}
|
||||
|
||||
if [[ ${PRODUCT_NAME}${PRODUCT_EDITION} == "documentserver" ]]; then
|
||||
REDIS_ENABLED=false
|
||||
else
|
||||
REDIS_ENABLED=true
|
||||
fi
|
||||
|
||||
ONLYOFFICE_DEFAULT_CONFIG=${CONF_DIR}/local.json
|
||||
ONLYOFFICE_LOG4JS_CONFIG=${CONF_DIR}/log4js/production.json
|
||||
ONLYOFFICE_EXAMPLE_CONFIG=${CONF_DIR}-example/local.json
|
||||
|
||||
JSON_BIN=${APP_DIR}/npm/node_modules/.bin/json
|
||||
JSON_BIN=${APP_DIR}/npm/json
|
||||
JSON="${JSON_BIN} -q -f ${ONLYOFFICE_DEFAULT_CONFIG}"
|
||||
JSON_LOG="${JSON_BIN} -q -f ${ONLYOFFICE_LOG4JS_CONFIG}"
|
||||
JSON_EXAMPLE="${JSON_BIN} -q -f ${ONLYOFFICE_EXAMPLE_CONFIG}"
|
||||
|
||||
DS_PORT=${DS_PORT:-80}
|
||||
|
||||
LOCAL_SERVICES=()
|
||||
|
||||
PG_ROOT=/var/lib/postgresql
|
||||
PG_VERSION=9.5
|
||||
PG_NAME=main
|
||||
PGDATA=${PG_ROOT}/${PG_VERSION}/${PG_NAME}
|
||||
PG_NEW_CLUSTER=false
|
||||
RABBITMQ_DATA=/var/lib/rabbitmq
|
||||
REDIS_DATA=/var/lib/redis
|
||||
|
||||
if [ "${LETS_ENCRYPT_DOMAIN}" != "" -a "${LETS_ENCRYPT_MAIL}" != "" ]; then
|
||||
LETSENCRYPT_ROOT_DIR="/etc/letsencrypt/live"
|
||||
SSL_CERTIFICATE_PATH=${LETSENCRYPT_ROOT_DIR}/${LETS_ENCRYPT_DOMAIN}/fullchain.pem
|
||||
SSL_KEY_PATH=${LETSENCRYPT_ROOT_DIR}/${LETS_ENCRYPT_DOMAIN}/privkey.pem
|
||||
fi
|
||||
|
||||
read_setting(){
|
||||
POSTGRESQL_SERVER_HOST=${POSTGRESQL_SERVER_HOST:-$(${JSON} services.CoAuthoring.sql.dbHost)}
|
||||
POSTGRESQL_SERVER_PORT=${POSTGRESQL_SERVER_PORT:-5432}
|
||||
POSTGRESQL_SERVER_DB_NAME=${POSTGRESQL_SERVER_DB_NAME:-$(${JSON} services.CoAuthoring.sql.dbName)}
|
||||
POSTGRESQL_SERVER_USER=${POSTGRESQL_SERVER_USER:-$(${JSON} services.CoAuthoring.sql.dbUser)}
|
||||
POSTGRESQL_SERVER_PASS=${POSTGRESQL_SERVER_PASS:-$(${JSON} services.CoAuthoring.sql.dbPass)}
|
||||
deprecated_var POSTGRESQL_SERVER_HOST DB_HOST
|
||||
deprecated_var POSTGRESQL_SERVER_PORT DB_PORT
|
||||
deprecated_var POSTGRESQL_SERVER_DB_NAME DB_NAME
|
||||
deprecated_var POSTGRESQL_SERVER_USER DB_USER
|
||||
deprecated_var POSTGRESQL_SERVER_PASS DB_PWD
|
||||
deprecated_var RABBITMQ_SERVER_URL AMQP_URI
|
||||
deprecated_var AMQP_SERVER_URL AMQP_URI
|
||||
deprecated_var AMQP_SERVER_TYPE AMQP_TYPE
|
||||
|
||||
METRICS_ENABLED="${METRICS_ENABLED:-false}"
|
||||
METRICS_HOST="${METRICS_HOST:-localhost}"
|
||||
METRICS_PORT="${METRICS_PORT:-8125}"
|
||||
METRICS_PREFIX="${METRICS_PREFIX:-.ds}"
|
||||
|
||||
DB_HOST=${DB_HOST:-${POSTGRESQL_SERVER_HOST:-$(${JSON} services.CoAuthoring.sql.dbHost)}}
|
||||
DB_TYPE=${DB_TYPE:-$(${JSON} services.CoAuthoring.sql.type)}
|
||||
case $DB_TYPE in
|
||||
"postgres")
|
||||
DB_PORT=${DB_PORT:-"5432"}
|
||||
;;
|
||||
"mariadb"|"mysql")
|
||||
DB_PORT=${DB_PORT:-"3306"}
|
||||
;;
|
||||
"")
|
||||
DB_PORT=${DB_PORT:-${POSTGRESQL_SERVER_PORT:-$(${JSON} services.CoAuthoring.sql.dbPort)}}
|
||||
;;
|
||||
*)
|
||||
echo "ERROR: unknown database type"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
DB_NAME=${DB_NAME:-${POSTGRESQL_SERVER_DB_NAME:-$(${JSON} services.CoAuthoring.sql.dbName)}}
|
||||
DB_USER=${DB_USER:-${POSTGRESQL_SERVER_USER:-$(${JSON} services.CoAuthoring.sql.dbUser)}}
|
||||
DB_PWD=${DB_PWD:-${POSTGRESQL_SERVER_PASS:-$(${JSON} services.CoAuthoring.sql.dbPass)}}
|
||||
|
||||
RABBITMQ_SERVER_URL=${RABBITMQ_SERVER_URL:-$(${JSON} rabbitmq.url)}
|
||||
AMQP_SERVER_URL=${AMQP_SERVER_URL:-${RABBITMQ_SERVER_URL}}
|
||||
AMQP_SERVER_TYPE=${AMQP_SERVER_TYPE:-rabbitmq}
|
||||
parse_rabbitmq_url ${AMQP_SERVER_URL}
|
||||
AMQP_URI=${AMQP_URI:-${AMQP_SERVER_URL:-${RABBITMQ_SERVER_URL}}}
|
||||
AMQP_TYPE=${AMQP_TYPE:-${AMQP_SERVER_TYPE:-rabbitmq}}
|
||||
parse_rabbitmq_url ${AMQP_URI}
|
||||
|
||||
REDIS_SERVER_HOST=${REDIS_SERVER_HOST:-$(${JSON} services.CoAuthoring.redis.host)}
|
||||
REDIS_SERVER_PORT=${REDIS_SERVER_PORT:-6379}
|
||||
|
@ -76,6 +177,12 @@ read_setting(){
|
|||
DS_LOG_LEVEL=${DS_LOG_LEVEL:-$(${JSON_LOG} categories.default.level)}
|
||||
}
|
||||
|
||||
deprecated_var() {
|
||||
if [[ -n ${!1} ]]; then
|
||||
echo "Variable $1 is deprecated. Use $2 instead."
|
||||
fi
|
||||
}
|
||||
|
||||
parse_rabbitmq_url(){
|
||||
local amqp=$1
|
||||
|
||||
|
@ -98,7 +205,7 @@ parse_rabbitmq_url(){
|
|||
# extract the host
|
||||
local hostport="$(echo ${url/$userpass@/} | cut -d/ -f1)"
|
||||
# by request - try to extract the port
|
||||
local port="$(echo $hostport | sed -e 's,^.*:,:,g' -e 's,.*:\([0-9]*\).*,\1,g' -e 's,[^0-9],,g')"
|
||||
local port="$(echo $hostport | grep : | sed -r 's_^.*:+|/.*$__g')"
|
||||
|
||||
local host
|
||||
if [ -n "$port" ]; then
|
||||
|
@ -125,8 +232,8 @@ waiting_for_connection(){
|
|||
done
|
||||
}
|
||||
|
||||
waiting_for_postgresql(){
|
||||
waiting_for_connection ${POSTGRESQL_SERVER_HOST} ${POSTGRESQL_SERVER_PORT}
|
||||
waiting_for_db(){
|
||||
waiting_for_connection $DB_HOST $DB_PORT
|
||||
}
|
||||
|
||||
waiting_for_amqp(){
|
||||
|
@ -139,22 +246,32 @@ waiting_for_redis(){
|
|||
waiting_for_datacontainer(){
|
||||
waiting_for_connection ${ONLYOFFICE_DATA_CONTAINER_HOST} ${ONLYOFFICE_DATA_CONTAINER_PORT}
|
||||
}
|
||||
update_postgresql_settings(){
|
||||
${JSON} -I -e "this.services.CoAuthoring.sql.dbHost = '${POSTGRESQL_SERVER_HOST}'"
|
||||
${JSON} -I -e "this.services.CoAuthoring.sql.dbPort = '${POSTGRESQL_SERVER_PORT}'"
|
||||
${JSON} -I -e "this.services.CoAuthoring.sql.dbName = '${POSTGRESQL_SERVER_DB_NAME}'"
|
||||
${JSON} -I -e "this.services.CoAuthoring.sql.dbUser = '${POSTGRESQL_SERVER_USER}'"
|
||||
${JSON} -I -e "this.services.CoAuthoring.sql.dbPass = '${POSTGRESQL_SERVER_PASS}'"
|
||||
|
||||
update_statsd_settings(){
|
||||
${JSON} -I -e "if(this.statsd===undefined)this.statsd={};"
|
||||
${JSON} -I -e "this.statsd.useMetrics = '${METRICS_ENABLED}'"
|
||||
${JSON} -I -e "this.statsd.host = '${METRICS_HOST}'"
|
||||
${JSON} -I -e "this.statsd.port = '${METRICS_PORT}'"
|
||||
${JSON} -I -e "this.statsd.prefix = '${METRICS_PREFIX}'"
|
||||
}
|
||||
|
||||
update_db_settings(){
|
||||
${JSON} -I -e "this.services.CoAuthoring.sql.type = '${DB_TYPE}'"
|
||||
${JSON} -I -e "this.services.CoAuthoring.sql.dbHost = '${DB_HOST}'"
|
||||
${JSON} -I -e "this.services.CoAuthoring.sql.dbPort = '${DB_PORT}'"
|
||||
${JSON} -I -e "this.services.CoAuthoring.sql.dbName = '${DB_NAME}'"
|
||||
${JSON} -I -e "this.services.CoAuthoring.sql.dbUser = '${DB_USER}'"
|
||||
${JSON} -I -e "this.services.CoAuthoring.sql.dbPass = '${DB_PWD}'"
|
||||
}
|
||||
|
||||
update_rabbitmq_setting(){
|
||||
if [ "${AMQP_SERVER_TYPE}" == "rabbitmq" ]; then
|
||||
if [ "${AMQP_TYPE}" == "rabbitmq" ]; then
|
||||
${JSON} -I -e "if(this.queue===undefined)this.queue={};"
|
||||
${JSON} -I -e "this.queue.type = 'rabbitmq'"
|
||||
${JSON} -I -e "this.rabbitmq.url = '${AMQP_SERVER_URL}'"
|
||||
${JSON} -I -e "this.rabbitmq.url = '${AMQP_URI}'"
|
||||
fi
|
||||
|
||||
if [ "${AMQP_SERVER_TYPE}" == "activemq" ]; then
|
||||
if [ "${AMQP_TYPE}" == "activemq" ]; then
|
||||
${JSON} -I -e "if(this.queue===undefined)this.queue={};"
|
||||
${JSON} -I -e "this.queue.type = 'activemq'"
|
||||
${JSON} -I -e "if(this.activemq===undefined)this.activemq={};"
|
||||
|
@ -192,28 +309,51 @@ update_rabbitmq_setting(){
|
|||
}
|
||||
|
||||
update_redis_settings(){
|
||||
${JSON} -I -e "if(this.services.CoAuthoring.redis===undefined)this.services.CoAuthoring.redis={};"
|
||||
${JSON} -I -e "this.services.CoAuthoring.redis.host = '${REDIS_SERVER_HOST}'"
|
||||
${JSON} -I -e "this.services.CoAuthoring.redis.port = '${REDIS_SERVER_PORT}'"
|
||||
|
||||
if [ -n "${REDIS_SERVER_PASS}" ]; then
|
||||
${JSON} -I -e "this.services.CoAuthoring.redis.options = {'password':'${REDIS_SERVER_PASS}'}"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
update_jwt_settings(){
|
||||
if [ "${JWT_ENABLED}" == "true" ]; then
|
||||
${JSON} -I -e "this.services.CoAuthoring.token.enable.browser = ${JWT_ENABLED}"
|
||||
${JSON} -I -e "this.services.CoAuthoring.token.enable.request.inbox = ${JWT_ENABLED}"
|
||||
${JSON} -I -e "this.services.CoAuthoring.token.enable.request.outbox = ${JWT_ENABLED}"
|
||||
update_ds_settings(){
|
||||
${JSON} -I -e "this.services.CoAuthoring.token.enable.browser = ${JWT_ENABLED}"
|
||||
${JSON} -I -e "this.services.CoAuthoring.token.enable.request.inbox = ${JWT_ENABLED}"
|
||||
${JSON} -I -e "this.services.CoAuthoring.token.enable.request.outbox = ${JWT_ENABLED}"
|
||||
|
||||
${JSON} -I -e "this.services.CoAuthoring.secret.inbox.string = '${JWT_SECRET}'"
|
||||
${JSON} -I -e "this.services.CoAuthoring.secret.outbox.string = '${JWT_SECRET}'"
|
||||
${JSON} -I -e "this.services.CoAuthoring.secret.session.string = '${JWT_SECRET}'"
|
||||
${JSON} -I -e "this.services.CoAuthoring.secret.inbox.string = '${JWT_SECRET}'"
|
||||
${JSON} -I -e "this.services.CoAuthoring.secret.outbox.string = '${JWT_SECRET}'"
|
||||
${JSON} -I -e "this.services.CoAuthoring.secret.session.string = '${JWT_SECRET}'"
|
||||
|
||||
${JSON} -I -e "this.services.CoAuthoring.token.inbox.header = '${JWT_HEADER}'"
|
||||
${JSON} -I -e "this.services.CoAuthoring.token.outbox.header = '${JWT_HEADER}'"
|
||||
${JSON} -I -e "this.services.CoAuthoring.token.inbox.header = '${JWT_HEADER}'"
|
||||
${JSON} -I -e "this.services.CoAuthoring.token.outbox.header = '${JWT_HEADER}'"
|
||||
|
||||
if [ -f "${ONLYOFFICE_EXAMPLE_CONFIG}" ] && [ "${JWT_ENABLED}" == "true" ]; then
|
||||
${JSON_EXAMPLE} -I -e "this.server.token.enable = ${JWT_ENABLED}"
|
||||
${JSON_EXAMPLE} -I -e "this.server.token.secret = '${JWT_SECRET}'"
|
||||
${JSON_EXAMPLE} -I -e "this.server.token.authorizationHeader = '${JWT_HEADER}'"
|
||||
fi
|
||||
${JSON} -I -e "this.services.CoAuthoring.token.inbox.inBody = ${JWT_IN_BODY}"
|
||||
${JSON} -I -e "this.services.CoAuthoring.token.outbox.inBody = ${JWT_IN_BODY}"
|
||||
|
||||
if [ -f "${ONLYOFFICE_EXAMPLE_CONFIG}" ]; then
|
||||
${JSON_EXAMPLE} -I -e "this.server.token.enable = ${JWT_ENABLED}"
|
||||
${JSON_EXAMPLE} -I -e "this.server.token.secret = '${JWT_SECRET}'"
|
||||
${JSON_EXAMPLE} -I -e "this.server.token.authorizationHeader = '${JWT_HEADER}'"
|
||||
fi
|
||||
|
||||
if [ "${USE_UNAUTHORIZED_STORAGE}" == "true" ]; then
|
||||
${JSON} -I -e "if(this.services.CoAuthoring.requestDefaults===undefined)this.services.CoAuthoring.requestDefaults={}"
|
||||
${JSON} -I -e "if(this.services.CoAuthoring.requestDefaults.rejectUnauthorized===undefined)this.services.CoAuthoring.requestDefaults.rejectUnauthorized=false"
|
||||
fi
|
||||
|
||||
if [ "${WOPI_ENABLED}" == "true" ]; then
|
||||
${JSON} -I -e "if(this.wopi===undefined)this.wopi={}"
|
||||
${JSON} -I -e "this.wopi.enable = true"
|
||||
fi
|
||||
|
||||
if [ "${ALLOW_META_IP_ADDRESS}" = "true" ] || [ "${ALLOW_PRIVATE_IP_ADDRESS}" = "true" ]; then
|
||||
${JSON} -I -e "if(this.services.CoAuthoring['request-filtering-agent']===undefined)this.services.CoAuthoring['request-filtering-agent']={}"
|
||||
[ "${ALLOW_META_IP_ADDRESS}" = "true" ] && ${JSON} -I -e "this.services.CoAuthoring['request-filtering-agent'].allowMetaIPAddress = true"
|
||||
[ "${ALLOW_PRIVATE_IP_ADDRESS}" = "true" ] && ${JSON} -I -e "this.services.CoAuthoring['request-filtering-agent'].allowPrivateIPAddress = true"
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -229,26 +369,86 @@ create_postgresql_cluster(){
|
|||
}
|
||||
|
||||
create_postgresql_db(){
|
||||
sudo -u postgres psql -c "CREATE DATABASE onlyoffice;"
|
||||
sudo -u postgres psql -c "CREATE USER onlyoffice WITH password 'onlyoffice';"
|
||||
sudo -u postgres psql -c "GRANT ALL privileges ON DATABASE onlyoffice TO onlyoffice;"
|
||||
sudo -u postgres psql -c "CREATE USER $DB_USER WITH password '"$DB_PWD"';"
|
||||
sudo -u postgres psql -c "CREATE DATABASE $DB_NAME OWNER $DB_USER;"
|
||||
}
|
||||
|
||||
create_postgresql_tbl(){
|
||||
CONNECTION_PARAMS="-h${POSTGRESQL_SERVER_HOST} -p${POSTGRESQL_SERVER_PORT} -U${POSTGRESQL_SERVER_USER} -w"
|
||||
if [ -n "${POSTGRESQL_SERVER_PASS}" ]; then
|
||||
export PGPASSWORD=${POSTGRESQL_SERVER_PASS}
|
||||
create_db_tbl() {
|
||||
case $DB_TYPE in
|
||||
"postgres")
|
||||
create_postgresql_tbl
|
||||
;;
|
||||
"mariadb"|"mysql")
|
||||
create_mysql_tbl
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
upgrade_db_tbl() {
|
||||
case $DB_TYPE in
|
||||
"postgres")
|
||||
upgrade_postgresql_tbl
|
||||
;;
|
||||
"mariadb"|"mysql")
|
||||
upgrade_mysql_tbl
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
upgrade_postgresql_tbl() {
|
||||
if [ -n "$DB_PWD" ]; then
|
||||
export PGPASSWORD=$DB_PWD
|
||||
fi
|
||||
|
||||
PSQL="psql -q $CONNECTION_PARAMS"
|
||||
CREATEDB="createdb $CONNECTION_PARAMS"
|
||||
PSQL="psql -q -h$DB_HOST -p$DB_PORT -d$DB_NAME -U$DB_USER -w"
|
||||
|
||||
$PSQL -f "$APP_DIR/server/schema/postgresql/removetbl.sql"
|
||||
$PSQL -f "$APP_DIR/server/schema/postgresql/createdb.sql"
|
||||
}
|
||||
|
||||
upgrade_mysql_tbl() {
|
||||
CONNECTION_PARAMS="-h$DB_HOST -P$DB_PORT -u$DB_USER -p$DB_PWD -w"
|
||||
MYSQL="mysql -q $CONNECTION_PARAMS"
|
||||
|
||||
$MYSQL $DB_NAME < "$APP_DIR/server/schema/mysql/removetbl.sql" >/dev/null 2>&1
|
||||
$MYSQL $DB_NAME < "$APP_DIR/server/schema/mysql/createdb.sql" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
create_postgresql_tbl() {
|
||||
if [ -n "$DB_PWD" ]; then
|
||||
export PGPASSWORD=$DB_PWD
|
||||
fi
|
||||
|
||||
PSQL="psql -q -h$DB_HOST -p$DB_PORT -d$DB_NAME -U$DB_USER -w"
|
||||
$PSQL -f "$APP_DIR/server/schema/postgresql/createdb.sql"
|
||||
}
|
||||
|
||||
create_mysql_tbl() {
|
||||
CONNECTION_PARAMS="-h$DB_HOST -P$DB_PORT -u$DB_USER -p$DB_PWD -w"
|
||||
MYSQL="mysql -q $CONNECTION_PARAMS"
|
||||
|
||||
# Create db on remote server
|
||||
if $PSQL -lt | cut -d\| -f 1 | grep -qw | grep 0; then
|
||||
$CREATEDB $POSTGRESQL_SERVER_DB_NAME
|
||||
fi
|
||||
$MYSQL -e "CREATE DATABASE IF NOT EXISTS $DB_NAME DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;" >/dev/null 2>&1
|
||||
|
||||
$PSQL -d "${POSTGRESQL_SERVER_DB_NAME}" -f "${APP_DIR}/server/schema/postgresql/createdb.sql"
|
||||
$MYSQL $DB_NAME < "$APP_DIR/server/schema/mysql/createdb.sql" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
update_welcome_page() {
|
||||
WELCOME_PAGE="${APP_DIR}-example/welcome/docker.html"
|
||||
if [[ -e $WELCOME_PAGE ]]; then
|
||||
DOCKER_CONTAINER_ID=$(basename $(cat /proc/1/cpuset))
|
||||
(( ${#DOCKER_CONTAINER_ID} < 12 )) && DOCKER_CONTAINER_ID=$(hostname)
|
||||
if (( ${#DOCKER_CONTAINER_ID} >= 12 )); then
|
||||
if [[ -x $(command -v docker) ]]; then
|
||||
DOCKER_CONTAINER_NAME=$(docker inspect --format="{{.Name}}" $DOCKER_CONTAINER_ID)
|
||||
sed 's/$(sudo docker ps -q)/'"${DOCKER_CONTAINER_NAME#/}"'/' -i $WELCOME_PAGE
|
||||
JWT_MESSAGE=$(echo $JWT_MESSAGE | sed 's/$(sudo docker ps -q)/'"${DOCKER_CONTAINER_NAME#/}"'/')
|
||||
else
|
||||
sed 's/$(sudo docker ps -q)/'"${DOCKER_CONTAINER_ID::12}"'/' -i $WELCOME_PAGE
|
||||
JWT_MESSAGE=$(echo $JWT_MESSAGE | sed 's/$(sudo docker ps -q)/'"${DOCKER_CONTAINER_ID::12}"'/')
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
update_nginx_settings(){
|
||||
|
@ -288,8 +488,6 @@ update_nginx_settings(){
|
|||
fi
|
||||
else
|
||||
ln -sf ${NGINX_ONLYOFFICE_PATH}/ds.conf.tmpl ${NGINX_ONLYOFFICE_CONF}
|
||||
# set up default listening port
|
||||
sed 's,\(listen.\+:\)\([0-9]\+\)\(.*;\),'"\1${DS_PORT}\3"',' -i ${NGINX_ONLYOFFICE_CONF}
|
||||
fi
|
||||
|
||||
# check if ipv6 supported otherwise remove it from nginx config
|
||||
|
@ -300,17 +498,8 @@ update_nginx_settings(){
|
|||
if [ -f "${NGINX_ONLYOFFICE_EXAMPLE_CONF}" ]; then
|
||||
sed 's/linux/docker/' -i ${NGINX_ONLYOFFICE_EXAMPLE_CONF}
|
||||
fi
|
||||
}
|
||||
|
||||
update_supervisor_settings(){
|
||||
# Copy modified supervisor start script
|
||||
cp ${SYSCONF_TEMPLATES_DIR}/supervisor/supervisor /etc/init.d/
|
||||
# Copy modified supervisor config
|
||||
cp ${SYSCONF_TEMPLATES_DIR}/supervisor/supervisord.conf /etc/supervisor/supervisord.conf
|
||||
# Copy modified nginx start script
|
||||
cp ${SYSCONF_TEMPLATES_DIR}/nginx/nginx /etc/init.d/
|
||||
# Copy modified ngnix config
|
||||
cp ${SYSCONF_TEMPLATES_DIR}/nginx/nginx.conf /etc/nginx/nginx.conf
|
||||
documentserver-update-securelink.sh -s ${SECURE_LINK_SECRET:-$(pwgen -s 20)} -r false
|
||||
}
|
||||
|
||||
update_log_settings(){
|
||||
|
@ -321,40 +510,48 @@ update_logrotate_settings(){
|
|||
sed 's|\(^su\b\).*|\1 root root|' -i /etc/logrotate.conf
|
||||
}
|
||||
|
||||
update_release_date(){
|
||||
mkdir -p ${PRIVATE_DATA_DIR}
|
||||
echo ${RELEASE_DATE} > ${DS_RELEASE_DATE}
|
||||
}
|
||||
|
||||
# create base folders
|
||||
for i in converter docservice spellchecker metrics gc; do
|
||||
for i in converter docservice metrics; do
|
||||
mkdir -p "${DS_LOG_DIR}/$i"
|
||||
done
|
||||
|
||||
mkdir -p ${DS_LOG_DIR}-example
|
||||
|
||||
# create app folders
|
||||
for i in App_Data/cache/files App_Data/docbuilder; do
|
||||
mkdir -p "${DS_LIB_DIR}/$i"
|
||||
for i in ${DS_LIB_DIR}/App_Data/cache/files ${DS_LIB_DIR}/App_Data/docbuilder ${DS_LIB_DIR}-example/files; do
|
||||
mkdir -p "$i"
|
||||
done
|
||||
|
||||
# change folder rights
|
||||
for i in ${LOG_DIR} ${LIB_DIR} ${DATA_DIR}; do
|
||||
for i in ${LOG_DIR} ${LIB_DIR}; do
|
||||
chown -R ds:ds "$i"
|
||||
chmod -R 755 "$i"
|
||||
done
|
||||
|
||||
touch ${DS_LOG_DIR}/nginx.error.log
|
||||
chown www-data:www-data ${DS_LOG_DIR}/nginx.error.log
|
||||
|
||||
if [ ${ONLYOFFICE_DATA_CONTAINER_HOST} = "localhost" ]; then
|
||||
|
||||
read_setting
|
||||
|
||||
if [ $METRICS_ENABLED = "true" ]; then
|
||||
update_statsd_settings
|
||||
fi
|
||||
|
||||
update_welcome_page
|
||||
|
||||
update_log_settings
|
||||
|
||||
update_jwt_settings
|
||||
update_ds_settings
|
||||
|
||||
# update settings by env variables
|
||||
if [ ${POSTGRESQL_SERVER_HOST} != "localhost" ]; then
|
||||
update_postgresql_settings
|
||||
waiting_for_postgresql
|
||||
create_postgresql_tbl
|
||||
if [ $DB_HOST != "localhost" ]; then
|
||||
update_db_settings
|
||||
waiting_for_db
|
||||
create_db_tbl
|
||||
else
|
||||
# change rights for postgres directory
|
||||
chown -R postgres:postgres ${PG_ROOT}
|
||||
|
@ -371,15 +568,28 @@ if [ ${ONLYOFFICE_DATA_CONTAINER_HOST} = "localhost" ]; then
|
|||
if [ ${AMQP_SERVER_HOST} != "localhost" ]; then
|
||||
update_rabbitmq_setting
|
||||
else
|
||||
# change rights for rabbitmq directory
|
||||
chown -R rabbitmq:rabbitmq ${RABBITMQ_DATA}
|
||||
chmod -R go=rX,u=rwX ${RABBITMQ_DATA}
|
||||
if [ -f ${RABBITMQ_DATA}/.erlang.cookie ]; then
|
||||
chmod 400 ${RABBITMQ_DATA}/.erlang.cookie
|
||||
fi
|
||||
|
||||
LOCAL_SERVICES+=("rabbitmq-server")
|
||||
# allow Rabbitmq startup after container kill
|
||||
rm -rf /var/run/rabbitmq
|
||||
fi
|
||||
|
||||
if [ ${REDIS_SERVER_HOST} != "localhost" ]; then
|
||||
update_redis_settings
|
||||
else
|
||||
LOCAL_SERVICES+=("redis-server")
|
||||
if [ ${REDIS_ENABLED} = "true" ]; then
|
||||
if [ ${REDIS_SERVER_HOST} != "localhost" ]; then
|
||||
update_redis_settings
|
||||
else
|
||||
# change rights for redis directory
|
||||
chown -R redis:redis ${REDIS_DATA}
|
||||
chmod -R 750 ${REDIS_DATA}
|
||||
|
||||
LOCAL_SERVICES+=("redis-server")
|
||||
fi
|
||||
fi
|
||||
else
|
||||
# no need to update settings just wait for remote data
|
||||
|
@ -388,8 +598,12 @@ else
|
|||
# read settings after the data container in ready state
|
||||
# to prevent get unconfigureted data
|
||||
read_setting
|
||||
|
||||
update_welcome_page
|
||||
fi
|
||||
|
||||
find /etc/${COMPANY_NAME} ! -path '*logrotate*' -exec chown ds:ds {} \;
|
||||
|
||||
#start needed local services
|
||||
for i in ${LOCAL_SERVICES[@]}; do
|
||||
service $i start
|
||||
|
@ -401,13 +615,19 @@ if [ ${PG_NEW_CLUSTER} = "true" ]; then
|
|||
fi
|
||||
|
||||
if [ ${ONLYOFFICE_DATA_CONTAINER} != "true" ]; then
|
||||
waiting_for_postgresql
|
||||
waiting_for_db
|
||||
waiting_for_amqp
|
||||
waiting_for_redis
|
||||
if [ ${REDIS_ENABLED} = "true" ]; then
|
||||
waiting_for_redis
|
||||
fi
|
||||
|
||||
if [ "${IS_UPGRADE}" = "true" ]; then
|
||||
upgrade_db_tbl
|
||||
update_release_date
|
||||
fi
|
||||
|
||||
update_nginx_settings
|
||||
|
||||
update_supervisor_settings
|
||||
service supervisor start
|
||||
|
||||
# start cron to enable log rotating
|
||||
|
@ -419,8 +639,44 @@ fi
|
|||
# it run in all cases.
|
||||
service nginx start
|
||||
|
||||
if [ "${LETS_ENCRYPT_DOMAIN}" != "" -a "${LETS_ENCRYPT_MAIL}" != "" ]; then
|
||||
if [ ! -f "${SSL_CERTIFICATE_PATH}" -a ! -f "${SSL_KEY_PATH}" ]; then
|
||||
documentserver-letsencrypt.sh ${LETS_ENCRYPT_MAIL} ${LETS_ENCRYPT_DOMAIN}
|
||||
fi
|
||||
fi
|
||||
|
||||
# Regenerate the fonts list and the fonts thumbnails
|
||||
documentserver-generate-allfonts.sh ${ONLYOFFICE_DATA_CONTAINER}
|
||||
if [ "${GENERATE_FONTS}" == "true" ]; then
|
||||
documentserver-generate-allfonts.sh ${ONLYOFFICE_DATA_CONTAINER}
|
||||
fi
|
||||
documentserver-static-gzip.sh ${ONLYOFFICE_DATA_CONTAINER}
|
||||
|
||||
tail -f /var/log/${COMPANY_NAME}/**/*.log
|
||||
echo "${JWT_MESSAGE}"
|
||||
|
||||
# Check if lager file limits should be set
|
||||
if [ "$LARGER_FILE_LIMITS" = "true" ]; then
|
||||
if [ -e /app/ds/file_limits_set ]; then
|
||||
echo ""
|
||||
else
|
||||
touch /app/ds/file_limits_set
|
||||
|
||||
sed -i -e 's/104857600/10485760000/g' /etc/onlyoffice/documentserver-example/production-linux.json
|
||||
|
||||
sed -i '9iclient_max_body_size 1000M;' /etc/onlyoffice/documentserver-example/nginx/includes/ds-example.conf
|
||||
sed -i '16iclient_max_body_size 1000M;' /etc/nginx/nginx.conf
|
||||
|
||||
sed -i -e 's/104857600/10485760000/g' /etc/onlyoffice/documentserver/default.json
|
||||
sed -i -e 's/50MB/5000MB/g' /etc/onlyoffice/documentserver/default.json
|
||||
sed -i -e 's/300MB/3000MB/g' /etc/onlyoffice/documentserver/default.json
|
||||
|
||||
sed -i 's/^client_max_body_size 100m;$/client_max_body_size 1000m;/' /etc/onlyoffice/documentserver/nginx/includes/ds-common.conf
|
||||
|
||||
service nginx restart
|
||||
supervisorctl restart all
|
||||
|
||||
fi
|
||||
fi
|
||||
|
||||
tail -f /var/log/${COMPANY_NAME}/**/*.log &
|
||||
wait $!
|
||||
|
||||
|
|
BIN
slideThemes/01_blank.pptx
Normal file
BIN
slideThemes/01_blank.pptx
Normal file
Binary file not shown.
BIN
slideThemes/02_basic.pptx
Normal file
BIN
slideThemes/02_basic.pptx
Normal file
Binary file not shown.
BIN
slideThemes/03_classic.pptx
Normal file
BIN
slideThemes/03_classic.pptx
Normal file
Binary file not shown.
BIN
slideThemes/04_official.pptx
Normal file
BIN
slideThemes/04_official.pptx
Normal file
Binary file not shown.
BIN
slideThemes/05_green leaf.pptx
Normal file
BIN
slideThemes/05_green leaf.pptx
Normal file
Binary file not shown.
BIN
slideThemes/06_lines.pptx
Normal file
BIN
slideThemes/06_lines.pptx
Normal file
Binary file not shown.
BIN
slideThemes/07_office.pptx
Normal file
BIN
slideThemes/07_office.pptx
Normal file
Binary file not shown.
BIN
slideThemes/08_safari.pptx
Normal file
BIN
slideThemes/08_safari.pptx
Normal file
Binary file not shown.
BIN
slideThemes/09_dotted.pptx
Normal file
BIN
slideThemes/09_dotted.pptx
Normal file
Binary file not shown.
BIN
slideThemes/10_corner.pptx
Normal file
BIN
slideThemes/10_corner.pptx
Normal file
Binary file not shown.
BIN
slideThemes/11_tort.pptx
Normal file
BIN
slideThemes/11_tort.pptx
Normal file
Binary file not shown.
35
tests/postgres.yml
Normal file
35
tests/postgres.yml
Normal file
|
@ -0,0 +1,35 @@
|
|||
version: '2.1'
|
||||
services:
|
||||
onlyoffice-documentserver:
|
||||
container_name: onlyoffice-documentserver
|
||||
build:
|
||||
context: ../.
|
||||
depends_on:
|
||||
- onlyoffice-postgresql
|
||||
environment:
|
||||
- DB_TYPE=${DB_TYPE:-postgres}
|
||||
- DB_HOST=${DB_HOST:-onlyoffice-postgresql}
|
||||
- DB_PORT=${DB_PORT:-5432}
|
||||
- DB_NAME=${DB_NAME:-onlyoffice}
|
||||
- DB_USER=${DB_USER:-onlyoffice}
|
||||
- DB_PWD=${DB_PWD:-onlyoffice}
|
||||
stdin_open: true
|
||||
restart: always
|
||||
ports:
|
||||
- '80:80'
|
||||
|
||||
onlyoffice-postgresql:
|
||||
container_name: onlyoffice-postgresql
|
||||
image: postgres:${POSTGRES_VERSION:-12}
|
||||
environment:
|
||||
- POSTGRES_DB=${POSTGRES_DB:-onlyoffice}
|
||||
- POSTGRES_USER=${POSTGRES_USER:-onlyoffice}
|
||||
- POSTGRES_HOST_AUTH_METHOD=${POSTGRES_HOST_AUTH_METHOD:-trust}
|
||||
restart: always
|
||||
expose:
|
||||
- '5432'
|
||||
volumes:
|
||||
- postgresql_data:/var/lib/postgresql
|
||||
|
||||
volumes:
|
||||
postgresql_data:
|
46
tests/prometheus.yml
Normal file
46
tests/prometheus.yml
Normal file
|
@ -0,0 +1,46 @@
|
|||
version: '2.1'
|
||||
services:
|
||||
onlyoffice-documentserver:
|
||||
container_name: onlyoffice-documentserver
|
||||
build:
|
||||
context: ../.
|
||||
depends_on:
|
||||
- onlyoffice-statsd-exporter
|
||||
environment:
|
||||
- METRICS_ENABLED=${METRICS_ENABLED:-true}
|
||||
- METRICS_HOST=${METRICS_HOST:-onlyoffice-statsd-exporter}
|
||||
- METRICS_PORT=${METRICS_PORT:-9125}
|
||||
- METRICS_PREFIX=${METRICS_PREFIX:-ds.}
|
||||
stdin_open: true
|
||||
restart: always
|
||||
ports:
|
||||
- '80:80'
|
||||
|
||||
onlyoffice-statsd-exporter:
|
||||
container_name: onlyoffice-statsd-exporter
|
||||
image: prom/statsd-exporter
|
||||
command: --statsd.event-flush-interval=30000ms
|
||||
ports:
|
||||
- '9102:9102'
|
||||
- '9125:9125/tcp'
|
||||
- '9125:9125/udp'
|
||||
|
||||
onlyoffice-prometheus:
|
||||
container_name: onlyoffice-prometheus
|
||||
image: prom/prometheus
|
||||
ports:
|
||||
- '9090:9090'
|
||||
volumes:
|
||||
- ./prometheus/prometheus-scrape/statsd-exporter.yml:/etc/prometheus/prometheus.yml
|
||||
|
||||
grafana:
|
||||
container_name: onlyoffice-grafana
|
||||
image: bitnami/grafana
|
||||
ports:
|
||||
- '3000:3000'
|
||||
environment:
|
||||
- 'GF_SECURITY_ADMIN_PASSWORD=G0pGE4'
|
||||
volumes:
|
||||
- ./prometheus/grafana/conf/prometheus.yml:/opt/bitnami/grafana/conf/provisioning/datasources/prometheus.yml
|
||||
- ./prometheus/grafana/conf/default-provider.yaml:/opt/bitnami/grafana/conf/provisioning/dashboards/default-provider.yaml
|
||||
- ./prometheus/grafana/dashboards:/opt/bitnami/grafana/dashboards
|
23
tests/prometheus/grafana/conf/default-provider.yaml
Normal file
23
tests/prometheus/grafana/conf/default-provider.yaml
Normal file
|
@ -0,0 +1,23 @@
|
|||
apiVersion: 1
|
||||
providers:
|
||||
# <string> an unique provider name
|
||||
- name: 'default-provider'
|
||||
# <int> org id. will default to orgId 1 if not specified
|
||||
orgId: 1
|
||||
# <string, required> name of the dashboard folder. Required
|
||||
folder: dashboards
|
||||
# <string> folder UID. will be automatically generated if not specified
|
||||
folderUid: ''
|
||||
# <string, required> provider type. Required
|
||||
type: file
|
||||
# <bool> disable dashboard deletion
|
||||
disableDeletion: false
|
||||
# <bool> enable dashboard editing
|
||||
editable: true
|
||||
# <int> how often Grafana will scan for changed dashboards
|
||||
updateIntervalSeconds: 10
|
||||
options:
|
||||
# <string, required> path to dashboard files on disk. Required
|
||||
path: /opt/bitnami/grafana/dashboards
|
||||
# <bool> enable folders creation for dashboards
|
||||
#foldersFromFilesStructure: true
|
6
tests/prometheus/grafana/conf/prometheus.yml
Normal file
6
tests/prometheus/grafana/conf/prometheus.yml
Normal file
|
@ -0,0 +1,6 @@
|
|||
apiVersion: 1
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
url: http://onlyoffice-prometheus:9090
|
||||
editable: true
|
File diff suppressed because it is too large
Load diff
6
tests/prometheus/prometheus-scrape/statsd-exporter.yml
Normal file
6
tests/prometheus/prometheus-scrape/statsd-exporter.yml
Normal file
|
@ -0,0 +1,6 @@
|
|||
scrape_configs:
|
||||
- job_name: 'statsd'
|
||||
scrape_interval: 30s
|
||||
static_configs:
|
||||
- targets:
|
||||
- onlyoffice-statsd-exporter:9102
|
Loading…
Reference in a new issue