From cc5377ace6c9189826aecde5575c9be394f1703c Mon Sep 17 00:00:00 2001
From: Jeffrey Phillips Freeman <the@jeffreyfreeman.me>
Date: Thu, 1 Oct 2020 11:06:03 -0400
Subject: [PATCH] Updated readme with some additional info on how to run and
 build.

---
 README.md | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 69 insertions(+), 2 deletions(-)

diff --git a/README.md b/README.md
index 834b2fe..773b7db 100644
--- a/README.md
+++ b/README.md
@@ -42,10 +42,77 @@ git clone http://git.qoto.org/aparapi/aparapi-docker.git
 
 ## Building
 
+This repo holds multiple versions of images depending on the opencl implementation being used. The following set of commands shows how to build each one.
 
+```bash
+docker build -t <image name> --build-arg "aparapiver=<aparapi Version>" amdgpu/
+docker build -t <image name> --build-arg "aparapiver=<aparapi Version>" nvidia/
+
+```
+
+## Running
+
+To run the amdgpu-pro image you must have compatible hardware and drivers installed. To do so execute the following command.
 
 ```bash
-docker build --device /dev/dri:/dev/dri -t <image name> --build-arg "aparapiver=<aparapi Version>" amdgpu/
-docker build --device /dev/dri:/dev/dri -t <image name> --build-arg "aparapiver=<aparapi Version>" nvidia/
+docker run --device /dev/dri -it aparapi/aparapi-amdgpu:latest bash
+```
+
+To run the NVIDIA based OpenCL implementation you can run the following command presuming you have the [NVIDIA Container Toolkit](https://github.com/NVIDIA/nvidia-docker) installed as well as the appropriate drivers.
+
+```bash
+docker run --runtime=nvidia -it aparapi/aparapi-nvidia:latest bash
+```
+
+### Using as a Gitlab Runner
+
+If you wish to use this container as an image for use in a GitLab CI when setting up your own runner then the following config.toml will serve as an example. Ensure you select an AMI which has the 
+[NVIDIA Container Toolkit](https://github.com/NVIDIA/nvidia-docker) installed and the instance type is GPU-accelerated with NVIDIA hardware (most are). Both of which are the case in the below example. Don't forget
+to also change the MachineOptions and other values to appropriate values.
+
+
+```toml
+[[runners]]
+  name = "GPGPU Runner"
+  url = "https://git.qoto.org/"
+  token = "XXXXXXXXXXXXXXXXX"
+  executor = "docker+machine"
+  limit=1
+  [runners.custom_build_dir]
+  [runners.docker]
+    tls_verify = false
+    image = "aparapi/aparapi-nvidia:latest"
+    privileged = true
+    disable_entrypoint_overwrite = false
+    oom_kill_disable = false
+    disable_cache = true
+    shm_size = 0
+    runtime = "nvidia"
+  [runners.cache]
+    [runners.cache.s3]
+      ServerAddress = "s3.wasabisys.com"
+      AccessKey = "XXXXXXXXXXXXXXXXX"
+      SecretKey = "XXXXXXXXXXXXXXXXX"
+      BucketName = "git.qoto.org"
+      BucketLocation = "us-east-1"
+  [runners.machine]
+    IdleCount = 0
+    IdleTime = 1800
+    MachineDriver = "amazonec2"
+    MachineName = "gitlab-docker-machine-%s"
+    MachineOptions = [
+      "amazonec2-access-key=XXXXXXXXXXXXXXXX",
+      "amazonec2-secret-key=XXXXXXXXXXXXXXXX",
+      "amazonec2-vpc-id=vpc-0243bc5bc666df2e2",
+      "amazonec2-subnet-id=subnet-0a43db1988ad60343",
+      "amazonec2-use-private-address=true",
+      "amazonec2-tags=runner-manager-name,gitlab-aws-autoscaler,gitlab,true,gitlab-runner-autoscale,true",
+      "amazonec2-security-group=qoto-sq",
+      "amazonec2-instance-type=p2.xlarge",
+      "amazonec2-region=us-east-1",
+      "amazonec2-zone=a",
+      "amazonec2-ami=ami-06a25ee8966373068",
+      "amazonec2-root-size=128",
+    ]
 
 ```
-- 
GitLab