If you suspect this could be a bug, follow the template.
- What version of Dgraph are you using?
Dgraph version : v1.0.12-rc3-494-g16663939
Commit SHA-1 : 16663939
Commit timestamp : 2019-06-07 10:28:44 +0530
Branch : master
Go version : go1.12.5
-
Have you tried reproducing the issue with latest release?
Yes
-
Steps to reproduce the issue (command/config used to run Dgraph).
Use this script to run flock. It's just pulling new master of dgraph and flock and running them along with flock client.
#!/bin/bash
set -x
RUN_TIME=7200
SLEEP_TIME=21600
DGRAPH_DIR=$HOME/go/src/github.com/dgraph-io/dgraph
FLOCK_DIR=$HOME/go/src/github.com/dgraph-io/flock
DOCKER_TMP_DIRS=/tmp/flock_test
OUTPUTS_DIR=$HOME/docker_outputs
while true
do
cd $DGRAPH_DIR
echo "Pulling dgraph fom git"
git checkout master
git pull
cd $FLOCK_DIR
echo "Pulling flock from git"
git checkout master
git pull
cd $GOPATH/bin
echo "Compiling dgraph and flock"
go install -race github.com/dgraph-io/dgraph/dgraph
go install github.com/dgraph-io/flock
echo "Cleaning temp directories"
rm -rf $DOCKER_TMP_DIRS
echo "Recreating temp directories"
mkdir -p $DOCKER_TMP_DIRS/{working,testdata,data}
mkdir -p $OUTPUTS_DIR
chown -R flock.flock $DOCKER_TMP_DIRS
chmod -R 777 $DOCKER_TMP_DIRS
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
killall timeout
echo "Running docker compose"
time_now=`date +%s` # Present timestamp
timeout $RUN_TIME docker-compose -f $HOME/docker-compose.yml up >$OUTPUTS_DIR/$time_now 2>&1 &
echo "Now sleeping while the dockers come up"
sleep 120
echo "Now starting flock"
timeout $RUN_TIME $GOPATH/bin/flock -a ":9080,:9081,:9082" >$HOME/flock_outputs 2>&1 &
echo "Now starting flock client"
timeout $RUN_TIME go run $FLOCK_DIR/client/main.go -a ":9080,:9081,:9082" >$HOME/flock_client_output 2>&1 &
sleep $RUN_TIME
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
sleep $SLEEP_TIME
done
My docker-compose.yml file
# Auto-generated with: [/home/javier/dgraph-io/dgraph/compose/compose -u -d /var/tmp/flock_test/data/twitterer]
#
version: "3.5"
services:
alpha1:
image: dgraph/dgraph
container_name: alpha1
working_dir: /tmp/flock_test/working/alpha1
labels:
cluster: flock-cluster
ports:
- 8080:8080
- 9080:9080
volumes:
- type: bind
source: $GOPATH/bin/dgraph
target: /gobin/dgraph
read_only: true
- type: bind
source: /tmp/flock_test/testdata
target: /tmp/flock_test/data
read_only: false
user: ${UID:-1000}
command: /gobin/dgraph alpha -p /tmp/flock_test/data/alpha1/p -w /tmp/flock_test/data/alpha1/w -o 0 --my=alpha1:7080 --lru_mb=1024
--zero=zero1:5080 --logtostderr -v=2
alpha2:
image: dgraph/dgraph
container_name: alpha2
working_dir: /tmp/flock_test/working/alpha2
depends_on:
- alpha1
labels:
cluster: flock-cluster
ports:
- 8081:8081
- 9081:9081
volumes:
- type: bind
source: $GOPATH/bin/dgraph
target: /gobin/dgraph
read_only: true
- type: bind
source: /tmp/flock_test/testdata
target: /tmp/flock_test/data
read_only: false
user: ${UID:-1000}
command: /gobin/dgraph alpha -p /tmp/flock_test/data/alpha2/p -w /tmp/flock_test/data/alpha2/w -o 1 --my=alpha2:7081 --lru_mb=1024
--zero=zero1:5080 --logtostderr -v=2
alpha3:
image: dgraph/dgraph
container_name: alpha3
working_dir: /tmp/flock_test/working/alpha3
depends_on:
- alpha2
labels:
cluster: flock-cluster
ports:
- 8082:8082
- 9082:9082
volumes:
- type: bind
source: $GOPATH/bin/dgraph
target: /gobin/dgraph
read_only: true
- type: bind
source: /tmp/flock_test/testdata
target: /tmp/flock_test/data
read_only: false
user: ${UID:-1000}
command: /gobin/dgraph alpha -p /tmp/flock_test/data/alpha3/p -w /tmp/flock_test/data/alpha3/w -o 2 --my=alpha3:7082 --lru_mb=1024
--zero=zero1:5080 --logtostderr -v=2
zero1:
image: dgraph/dgraph
container_name: zero1
working_dir: /tmp/flock_test/working/zero1
labels:
cluster: flock-cluster
ports:
- 5080:5080
- 6080:6080
volumes:
- type: bind
source: $GOPATH/bin/dgraph
target: /gobin/dgraph
read_only: true
- type: bind
source: /tmp/flock_test/testdata
target: /tmp/flock_test/data
read_only: false
user: ${UID:-1000}
command: /gobin/dgraph zero -w /tmp/flock_test/data/zero1/zw -o 0 --idx=1 --my=zero1:5080 --replicas=3
--logtostderr -v=2 --bindall
zero2:
image: dgraph/dgraph
container_name: zero2
working_dir: /tmp/flock_test/working/zero2
depends_on:
- zero1
labels:
cluster: flock-cluster
ports:
- 5082:5082
- 6082:6082
volumes:
- type: bind
source: $GOPATH/bin/dgraph
target: /gobin/dgraph
read_only: true
- type: bind
source: /tmp/flock_test/testdata
target: /tmp/flock_test/data
read_only: false
user: ${UID:-1000}
command: /gobin/dgraph zero -w /tmp/flock_test/data/zero2/zw -o 2 --idx=2 --my=zero2:5082 --replicas=3
--logtostderr -v=2 --peer=zero1:5080
zero3:
image: dgraph/dgraph
container_name: zero3
working_dir: /tmp/flock_test/working/zero3
depends_on:
- zero2
labels:
cluster: flock-cluster
ports:
- 5083:5083
- 6083:6083
volumes:
- type: bind
source: $GOPATH/bin/dgraph
target: /gobin/dgraph
read_only: true
- type: bind
source: /tmp/flock_test/testdata
target: /tmp/flock_test/data
read_only: false
user: ${UID:-1000}
command: /gobin/dgraph zero -w /tmp/flock_test/data/zero3/zw -o 3 --idx=3 --my=zero3:5083 --replicas=3
--logtostderr -v=2 --peer=zero1:5080
ratel:
image: dgraph/dgraph
container_name: ratel
depends_on:
- alpha1
labels:
cluster: flock-cluster
ports:
- 8000:8000
command: dgraph-ratel
volumes: {}
- Expected behaviour and actual result.
Check comments for races.
If you suspect this could be a bug, follow the template.
Have you tried reproducing the issue with latest release?
Yes
Steps to reproduce the issue (command/config used to run Dgraph).
Use this script to run flock. It's just pulling new master of dgraph and flock and running them along with flock client.
My docker-compose.yml file
Check comments for races.