.github/workflows/release.yml
... ...
@@ -97,11 +97,11 @@ jobs:
97 97
log-accepted-android-sdk-licenses: 'false'
98 98
cmdline-tools-version: 9123335 # This corresponds to human-friendly version number 8.0
99 99
- name: Start MongoDB
100
- uses: supercharge/mongodb-github-action@90004df786821b6308fb02299e5835d0dae05d0d #v1.12.0
100
+ uses: supercharge/mongodb-github-action@315db7fe45ac2880b7758f1933e6e5d59afd5e94 #v1.12.1
101 101
with:
102 102
mongodb-version: 7.0
103 103
- name: Start RabbitMQ
104
- uses: namoshek/rabbitmq-github-action@58b841360ede0e19fc5e4929fc2477ecc09193d8 # v1.1.0
104
+ uses: Namoshek/rabbitmq-github-action@80a182e44c7f60bc3417a8689d6a24fe9d08b27b #v1.2.0
105 105
with:
106 106
version: '3.8.9'
107 107
ports: '5672:5672'
Gemfile.lock
... ...
@@ -43,7 +43,7 @@ GEM
43 43
dotenv (2.8.1)
44 44
emoji_regex (3.2.3)
45 45
excon (0.112.0)
46
- faraday (1.10.4)
46
+ faraday (1.10.5)
47 47
faraday-em_http (~> 1.0)
48 48
faraday-em_synchrony (~> 1.0)
49 49
faraday-excon (~> 1.1)
Home.md
... ...
@@ -59,6 +59,7 @@ SAP is at the center of today’s technology revolution, developing innovations
59 59
* Amazon
60 60
* [[Amazon EC2|wiki/info/landscape/amazon-ec2]]
61 61
* [[Upgrading ARCHIVE server|wiki/info/landscape/archive-server-upgrade]]
62
+ * [[Upgrading MongoDB Nodes|wiki/info/landscape/mongo-cluster-upgrade]]
62 63
* [[EC2 Backup Strategy|wiki/info/landscape/amazon-ec2-backup-strategy]]
63 64
* [[Creating an EC2 image from scratch|wiki/info/landscape/creating-ec2-image-from-scratch]]
64 65
* [[Upgrading an EC2 image|wiki/info/landscape/upgrading-ec2-image]]
configuration/.settings/org.eclipse.core.resources.prefs
... ...
@@ -0,0 +1,2 @@
1
+eclipse.preferences.version=1
2
+encoding/<project>=UTF-8
java/com.sap.sailing.domain.test/src/com/sap/sailing/domain/common/test/KadaneSerializationTest.java
... ...
@@ -0,0 +1,50 @@
1
+package com.sap.sailing.domain.common.test;
2
+
3
+import static org.junit.jupiter.api.Assertions.assertEquals;
4
+import static org.junit.jupiter.api.Assertions.assertNotSame;
5
+
6
+import java.io.IOException;
7
+import java.util.Random;
8
+
9
+import org.junit.jupiter.api.Test;
10
+
11
+import com.sap.sailing.domain.test.AbstractSerializationTest;
12
+import com.sap.sse.common.Util;
13
+import com.sap.sse.common.scalablevalue.KadaneExtremeSubsequenceFinder;
14
+import com.sap.sse.common.scalablevalue.ScalableDouble;
15
+
16
+public class KadaneSerializationTest extends AbstractSerializationTest {
17
+ @Test
18
+ public void testBasicKadaneSerialization() throws IOException, ClassNotFoundException {
19
+ final KadaneExtremeSubsequenceFinder<Double, Double, ScalableDouble> original = new com.sap.sse.common.scalablevalue.KadaneExtremeSubsequenceFinderLinkedNodesImpl<>();
20
+ original.add(new ScalableDouble(17));
21
+ original.add(new ScalableDouble(42));
22
+ original.add(new ScalableDouble(-3));
23
+ original.add(new ScalableDouble(-99));
24
+ original.add(new ScalableDouble(12));
25
+ assertDeserializedEqualsOriginal(original);
26
+ }
27
+
28
+ @Test
29
+ public void testLongKadaneSerialization() throws IOException, ClassNotFoundException {
30
+ final Random random = new Random();
31
+ final KadaneExtremeSubsequenceFinder<Double, Double, ScalableDouble> original = new com.sap.sse.common.scalablevalue.KadaneExtremeSubsequenceFinderLinkedNodesImpl<>();
32
+ final int NODES = 100000;
33
+ for (int i=0; i<NODES; i++) {
34
+ original.add(new ScalableDouble(random.nextDouble()-0.5));
35
+ }
36
+ assertDeserializedEqualsOriginal(original);
37
+ }
38
+
39
+ private void assertDeserializedEqualsOriginal(
40
+ final KadaneExtremeSubsequenceFinder<Double, Double, ScalableDouble> original)
41
+ throws IOException, ClassNotFoundException {
42
+ final KadaneExtremeSubsequenceFinder<Double, Double, ScalableDouble> clone = cloneBySerialization(original, null);
43
+ assertNotSame(clone, original);
44
+ assertEquals(Util.asList(original), Util.asList(clone));
45
+ assertEquals(original.getMaxSum(), clone.getMaxSum());
46
+ assertEquals(original.getMinSum(), clone.getMinSum());
47
+ assertEquals(original.getStartIndexOfMinSumSequence(), clone.getStartIndexOfMinSumSequence());
48
+ assertEquals(original.getStartIndexOfMaxSumSequence(), clone.getStartIndexOfMaxSumSequence());
49
+ }
50
+}
java/com.sap.sailing.domain.test/src/com/sap/sailing/domain/test/CourseChangeBasedTrackApproximationTest.java
... ...
@@ -109,7 +109,7 @@ public class CourseChangeBasedTrackApproximationTest {
109 109
}
110 110
final Iterable<GPSFixMoving> emptyManeuverCandidates = approximation.approximate(start, start.plus(samplingInterval.times(NUMBER_OF_FIXES_FOR_NON_MANEUVER)));
111 111
assertTrue(Util.isEmpty(emptyManeuverCandidates));
112
- final double aBitOverMinimumManeuverAngleDegrees = boatClass.getManeuverDegreeAngleThreshold() * 1.2;
112
+ final double aBitOverMinimumManeuverAngleDegrees = boatClass.getManeuverDegreeAngleThreshold() * 1.5;
113 113
// perform aBitOverMinimumManeuverAngleDegrees within five fixes:
114 114
final int NUMBER_OF_FIXES_FOR_MANEUVER = 5;
115 115
for (int i=0; i<NUMBER_OF_FIXES_FOR_MANEUVER; i++) {
java/com.sap.sailing.grib/src/com/sap/sailing/grib/impl/GribWindFieldFactoryImpl.java
... ...
@@ -19,6 +19,7 @@ import java.util.logging.Logger;
19 19
20 20
import com.sap.sailing.grib.GribWindField;
21 21
import com.sap.sailing.grib.GribWindFieldFactory;
22
+import com.sap.sse.common.Util;
22 23
import com.sap.sse.common.util.MappingIterable;
23 24
import com.sap.sse.util.LoggerAppender;
24 25
... ...
@@ -188,6 +189,9 @@ public class GribWindFieldFactoryImpl implements GribWindFieldFactory {
188 189
* are no longer needed.
189 190
*/
190 191
private File copyStreamToFile(InputStream s, String filename) throws IOException {
192
+ if (Util.hasLength(filename) && (filename.contains("..") || filename.contains("/") || filename.contains("\\"))) {
193
+ throw new IllegalArgumentException("File extension must not contain '..' or a file separator like '/'.");
194
+ }
191 195
Path tempDir = Files.createTempDirectory("gribcache");
192 196
Path filePath = tempDir.resolve(filename);
193 197
Files.copy(s, filePath);
java/com.sap.sailing.gwt.ui/src/main/java/com/sap/sailing/gwt/ui/adminconsole/AbstractLeaderboardConfigPanel.java
... ...
@@ -262,7 +262,7 @@ public abstract class AbstractLeaderboardConfigPanel extends FormPanel
262 262
Label helpLabel = new Label(stringMessages.helptextLinkingRaces());
263 263
hPanel.setWidget(0, 0, helpLabel);
264 264
hPanel.setWidget(0, 1, new HelpButton(HelpButtonResources.INSTANCE,
265
- stringMessages.videoGuide(), "https://vimeo.com/768053778/922b629cc4"));
265
+ stringMessages.videoGuide(), "https://sapsailing-documentation.s3-eu-west-1.amazonaws.com/adminconsole/LinkingEx.mp4"));
266 266
mainPanel.add(hPanel);
267 267
// caption panels for the selected leaderboard and tracked races
268 268
final HorizontalPanel splitPanel = new HorizontalPanel();
java/com.sap.sse.filestorage/src/com/sap/sse/filestorage/impl/LocalFileStorageServiceImpl.java
... ...
@@ -17,6 +17,7 @@ import org.apache.shiro.authz.UnauthorizedException;
17 17
import org.osgi.framework.BundleContext;
18 18
19 19
import com.sap.sailing.domain.common.security.SecuredDomainType;
20
+import com.sap.sse.common.Util;
20 21
import com.sap.sse.common.Util.Pair;
21 22
import com.sap.sse.filestorage.FileStorageService;
22 23
import com.sap.sse.filestorage.FileStorageServiceProperty;
... ...
@@ -41,8 +42,6 @@ import com.sap.sse.security.shared.TypeRelativeObjectIdentifier;
41 42
* @author Jan Broß
42 43
*
43 44
*/
44
-
45
-
46 45
public class LocalFileStorageServiceImpl extends BaseFileStorageServiceImpl implements FileStorageService {
47 46
private static final long serialVersionUID = -8661781258137340835L;
48 47
private static final String testFile = "Bundesliga2014_Regatta6_eventteaser.jpg";
... ...
@@ -62,6 +61,9 @@ public class LocalFileStorageServiceImpl extends BaseFileStorageServiceImpl impl
62 61
@Override
63 62
public URI storeFile(InputStream is, String fileExtension, long lengthInBytes)
64 63
throws IOException, UnauthorizedException {
64
+ if (Util.hasLength(fileExtension) && (fileExtension.contains("..") || fileExtension.contains("/") || fileExtension.contains("\\"))) {
65
+ throw new IllegalArgumentException("File extension must not contain '..' or a file separator like '/'.");
66
+ }
65 67
String fileName = getKey(fileExtension);
66 68
String pathToFile = localPath.getValue() + "/" + fileName;
67 69
return getSecurityService().setOwnershipCheckPermissionForObjectCreationAndRevertOnError(SecuredDomainType.FILE_STORAGE,
java/com.sap.sse.landscape.aws/src/com/sap/sse/landscape/aws/impl/AwsLandscapeImpl.java
... ...
@@ -998,7 +998,6 @@ public class AwsLandscapeImpl<ShardingKey> implements AwsLandscape<ShardingKey>
998 998
}
999 999
final Ec2Client ec2Client = getEc2Client(getRegion(az.getRegion()));
1000 1000
final Builder runInstancesRequestBuilder = RunInstancesRequest.builder()
1001
- .additionalInfo("Test " + getClass().getName())
1002 1001
.imageId(fromImage.getId().toString())
1003 1002
.minCount(numberOfHostsToLaunch)
1004 1003
.maxCount(numberOfHostsToLaunch)
java/com.sap.sse.security/src/com/sap/sse/security/jaxrs/api/OwnershipResource.java
... ...
@@ -9,6 +9,7 @@ import java.util.Set;
9 9
import java.util.UUID;
10 10
11 11
import javax.ws.rs.Consumes;
12
+import javax.ws.rs.DELETE;
12 13
import javax.ws.rs.GET;
13 14
import javax.ws.rs.PUT;
14 15
import javax.ws.rs.Path;
... ...
@@ -107,6 +108,18 @@ public class OwnershipResource extends AbstractSecurityResource {
107 108
return getOwnership(objectType, new String[] { typeRelativeObjectId });
108 109
}
109 110
111
+ @Path("{objectType}/{typeRelativeObjectId}")
112
+ @DELETE
113
+ @Produces("application/json;charset=UTF-8")
114
+ public Response deleteOwnership(@PathParam("objectType") String objectType,
115
+ @PathParam("typeRelativeObjectId") String typeRelativeObjectId) throws OwnershipException {
116
+ QualifiedObjectIdentifier identifier = new QualifiedObjectIdentifierImpl(objectType,
117
+ new TypeRelativeObjectIdentifier(typeRelativeObjectId));
118
+ SecurityUtils.getSubject().checkPermission(identifier.getStringPermission(DefaultActions.CHANGE_OWNERSHIP));
119
+ getSecurityService().deleteOwnership(identifier);
120
+ return Response.ok().build();
121
+ }
122
+
110 123
@Path("{objectType}")
111 124
@GET
112 125
@Produces("application/json;charset=UTF-8")
... ...
@@ -198,4 +211,16 @@ public class OwnershipResource extends AbstractSecurityResource {
198 211
}
199 212
return Response.ok(new GeneralResponse(true, "ACL changed successfully").toString()).build();
200 213
}
214
+
215
+ @Path("{objectType}/{typeRelativeObjectId}/"+KEY_ACL)
216
+ @DELETE
217
+ @Produces("application/json;charset=UTF-8")
218
+ public Response deleteAccessControlLists(@PathParam("objectType") String objectType,
219
+ @PathParam("typeRelativeObjectId") String typeRelativeObjectId) throws OwnershipException {
220
+ QualifiedObjectIdentifier identifier = new QualifiedObjectIdentifierImpl(objectType,
221
+ new TypeRelativeObjectIdentifier(typeRelativeObjectId));
222
+ SecurityUtils.getSubject().checkPermission(identifier.getStringPermission(DefaultActions.CHANGE_ACL));
223
+ getSecurityService().deleteAccessControlList(identifier);
224
+ return Response.ok().build();
225
+ }
201 226
}
java/com.sap.sse.security/src/com/sap/sse/security/util/SecuredServer.java
... ...
@@ -57,6 +57,12 @@ public interface SecuredServer {
57 57
void setGroupAndUserOwner(HasPermissions type, TypeRelativeObjectIdentifier typeRelativeObjectId,
58 58
Optional<String> displayName, Optional<UUID> groupId, Optional<String> username)
59 59
throws MalformedURLException, ClientProtocolException, IOException, ParseException;
60
+
61
+ void deleteOwnership(HasPermissions type, TypeRelativeObjectIdentifier typeRelativeObjectId)
62
+ throws MalformedURLException, ClientProtocolException, IOException, ParseException;
63
+
64
+ void deleteAccessControlLists(HasPermissions type, TypeRelativeObjectIdentifier typeRelativeObjectId)
65
+ throws MalformedURLException, ClientProtocolException, IOException, ParseException;
60 66
61 67
Iterable<Pair<WildcardPermission, Boolean>> hasPermissions(Iterable<WildcardPermission> permissions) throws UnsupportedEncodingException, MalformedURLException, ClientProtocolException, IOException, ParseException;
62 68
/**
java/com.sap.sse.security/src/com/sap/sse/security/util/impl/SecuredServerImpl.java
... ...
@@ -4,6 +4,7 @@ import java.io.ByteArrayInputStream;
4 4
import java.io.ByteArrayOutputStream;
5 5
import java.io.IOException;
6 6
import java.io.InputStreamReader;
7
+import java.net.MalformedURLException;
7 8
import java.net.URL;
8 9
import java.net.URLEncoder;
9 10
import java.util.ArrayList;
... ...
@@ -176,6 +177,36 @@ public class SecuredServerImpl implements SecuredServer {
176 177
}
177 178
178 179
@Override
180
+ public void deleteOwnership(HasPermissions type, TypeRelativeObjectIdentifier typeRelativeObjectId)
181
+ throws MalformedURLException, ClientProtocolException, IOException, ParseException {
182
+ final URL deleteOwnershipUrl = new URL(getBaseUrl(),
183
+ SECURITY_API_PREFIX + OwnershipResource.RESTSECURITY_OWNERSHIP + "/"
184
+ + type.getName() + "/" + typeRelativeObjectId.toString());
185
+ final HttpDelete deleteRequest = new HttpDelete(deleteOwnershipUrl.toString());
186
+ deleteRequest.setHeader(HTTP.CONTENT_TYPE, "application/json");
187
+ authenticate(deleteRequest);
188
+ final CloseableHttpResponse response = createHttpClient().execute(deleteRequest);
189
+ if (response.getStatusLine().getStatusCode() >= 300) {
190
+ throw new IllegalArgumentException(response.getStatusLine().getReasonPhrase());
191
+ }
192
+ }
193
+
194
+ @Override
195
+ public void deleteAccessControlLists(HasPermissions type, TypeRelativeObjectIdentifier typeRelativeObjectId)
196
+ throws MalformedURLException, ClientProtocolException, IOException, ParseException {
197
+ final URL deleteACLUrl = new URL(getBaseUrl(),
198
+ SECURITY_API_PREFIX + OwnershipResource.RESTSECURITY_OWNERSHIP + "/"
199
+ + type.getName() + "/" + typeRelativeObjectId.toString() + "/" + OwnershipResource.KEY_ACL);
200
+ final HttpDelete deleteRequest = new HttpDelete(deleteACLUrl.toString());
201
+ deleteRequest.setHeader(HTTP.CONTENT_TYPE, "application/json");
202
+ authenticate(deleteRequest);
203
+ final CloseableHttpResponse response = createHttpClient().execute(deleteRequest);
204
+ if (response.getStatusLine().getStatusCode() >= 300) {
205
+ throw new IllegalArgumentException(response.getStatusLine().getReasonPhrase());
206
+ }
207
+ }
208
+
209
+ @Override
179 210
public Map<UUID, Set<String>> getAccessControlLists(HasPermissions type, TypeRelativeObjectIdentifier typeRelativeObjectId) throws ClientProtocolException, IOException, ParseException {
180 211
final URL getGroupAndUserOwnerUrl = new URL(getBaseUrl(), SECURITY_API_PREFIX + OwnershipResource.RESTSECURITY_OWNERSHIP
181 212
+ "/" + type.getName() + "/" + typeRelativeObjectId.toString() + "/" + OwnershipResource.KEY_ACL);
java/com.sap.sse.security/webservices/api/ownership.html
... ...
@@ -93,6 +93,22 @@
93 93
<td><code>curl "http://127.0.0.1:8888/security/api/restsecurity/ownership/TRACKED_RACE?id=Croatia%20Coast%20Cup%202019%20-%20ORC%20with%20Spinnaker&id=Race%201%20-%20ORC%20mit%20Spi"</code><br>
94 94
Will provide a JSON document as explained above.</td>
95 95
</tr>
96
+ <tr>
97
+ <td>Request Method:</td>
98
+ <td>DELETE /{objectType}/{typeRelativeObjectId}</td>
99
+ </tr>
100
+ <tr>
101
+ <td>Parameter (path)</td>
102
+ <td>
103
+ <div>objectType</div> Object type represents the type of object to delete the ownership information for
104
+ <div>objectId</div> Unique Object id.
105
+ </td>
106
+ </tr>
107
+ <tr>
108
+ <td>Example:</td>
109
+ <td><code>curl -i -X DELETE "http://admin:admin@127.0.0.1:8888/security/api/restsecurity/ownership/USER_GROUP/82832851-07ac-47ee-9ddf-6f4f9eaa7823"</code><br>
110
+ Will delete the ownership information for the group identified.
111
+ </tr>
96 112
97 113
<tr>
98 114
<td>Request Method:</td>
... ...
@@ -102,7 +118,7 @@
102 118
<td>Output format:</td>
103 119
<td>a JSON document of the following format<br>
104 120
<pre>{"objectType":"USER_GROUP","objectId":"82832851-07ac-47ee-9ddf-6f4f9eaa7823","displayName": null,"acl":[{"groupId":null,"actions":["READ","!DELETE"]},{"groupId":"82832851-07ac-47ee-9ddf-6f4f9eaa7823","actions":["READ","!UPDATE"]}]}
105
-</pre>
121
+ </pre>
106 122
</td>
107 123
</tr>
108 124
<tr>
... ...
@@ -142,6 +158,23 @@
142 158
<td><code>curl -i -X PUT -H 'Content-Type: application/json' -d '{"objectType":"USER_GROUP","objectId":"82832851-07ac-47ee-9ddf-6f4f9eaa7823","displayName":"The Admin Tenant ACL","acl":[{"groupId":null,"actions":["READ","!UPDATE"]},{"groupId":"82832851-07ac-47ee-9ddf-6f4f9eaa7823","actions":["READ","!DELETE"]}]}' "http://admin:admin@127.0.0.1:8888/security/api/restsecurity/ownership/USER_GROUP/82832851-07ac-47ee-9ddf-6f4f9eaa7823/acl"</code><br>
143 159
Will provide a JSON document as explained above.</td>
144 160
</tr>
161
+ <tr>
162
+ <td>Request Method:</td>
163
+ <td>DELETE /{objectType}/{typeRelativeObjectId}/acl</td>
164
+ </tr>
165
+ <tr>
166
+ <td>Parameter (path)</td>
167
+ <td>
168
+ <div>objectType</div> Object type represents the type of object to delete the ACL information for
169
+ <div>objectId</div> Unique Object id.
170
+ </td>
171
+ </tr>
172
+ <tr>
173
+ <td>Example:</td>
174
+ <td><code>curl -i -X DELETE "http://admin:admin@127.0.0.1:8888/security/api/restsecurity/ownership/USER_GROUP/82832851-07ac-47ee-9ddf-6f4f9eaa7823/acl"</code><br>
175
+ Will delete the ACL information for the group identified.
176
+ </tr>
177
+
145 178
146 179
</table>
147 180
</body>
java/com.sap.sse.test/src/com/sap/sse/test/RegexTest.java
... ...
@@ -5,6 +5,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse;
5 5
import static org.junit.jupiter.api.Assertions.assertNull;
6 6
import static org.junit.jupiter.api.Assertions.assertTrue;
7 7
8
+import java.util.Arrays;
8 9
import java.util.logging.Logger;
9 10
import java.util.regex.Matcher;
10 11
import java.util.regex.Pattern;
... ...
@@ -94,4 +95,11 @@ public class RegexTest {
94 95
final Matcher m1 = nationalityPattern.matcher(sb.toString());
95 96
assertFalse(m1.matches());
96 97
}
98
+
99
+ @Test
100
+ public void testExpeditionHeaderSplitting() {
101
+ final String line = "a , b ,, c";
102
+ final String[] splitResult = line.split("\\s*,\\s*");
103
+ assertEquals(Arrays.asList("a", "b", "", "c"), Arrays.asList(splitResult));
104
+ }
97 105
}
wiki/info/landscape/archive-server-upgrade.md
... ...
@@ -90,7 +90,7 @@ The instance will start up, install the release as specified and launch the Java
90 90
91 91
But note: having launched the loading of all races doesn't make the new archive candidate present all content in the proper form yet. Many background processes will keep running for several more hours, computing maneuvers and from them updating wind estimations. You should let the process finish before running the mandatory archive server content comparison. One way to track these background processes is by looking at the EC2 console's instance monitoring and there the CPU Utilization chart. You will clearly see when the CPU utilization drops from 100% to 0% when the background processes are done. Instead, you may choose to ssh into the instance and run a ``top`` command or similar and track CPU load. Alternatively, track the ``/home/sailing/servers/server/logs/sailing0.log.0`` file on the new archive server candidate. It will contain lines of the sort
92 92
```
93
-INFO: Thread[MarkPassingCalculator for race R14 initialization,4,main]: Timeout waiting for future task com.sap.sse.util.impl.ThreadPoolAwareFutureTask@39f29211 (retrying); scheduled with executor com.sap.sse.util.impl.NamedTracingScheduledThreadPoolExecutor@51517746[Running, pool size = 7, active threads = 7, queued tasks = 352521, completed tasks = 610095][name=Default background executor]
93
+INFO: Thread&#91;MarkPassingCalculator for race R14 initialization,4,main&#93;: Timeout waiting for future task com.sap.sse.util.impl.ThreadPoolAwareFutureTask@39f29211 (retrying); scheduled with executor com.sap.sse.util.impl.NamedTracingScheduledThreadPoolExecutor@51517746\[Running, pool size = 7, active threads = 7, queued tasks = 352521, completed tasks = 610095\]\[name=Default background executor\]
94 94
```
95 95
that will keep repeating. Watch out for the ``queued tasks`` count. It should be decreasing, and when done it should go down to 0 eventually, although you may not see a log entry with "queued tasks = 0" necessarily.
96 96
wiki/info/landscape/mongo-cluster-upgrade.md
... ...
@@ -0,0 +1,91 @@
1
+# MongoDB Cluster Upgrades
2
+
3
+In our production environment on AWS, we currently (2026-02-10) run three MongoDB replica sets:
4
+
5
+- ``live``: holds all databases for live operations and consists of three nodes: two i3.large instances with fast NVMe storage used for the ``/var/lib/mongo`` partition, and a hidden instance with an EBS volume that is backed up on a daily basis
6
+- ``archive``: holds the ``winddb`` database used for the ARCHIVE server
7
+- ``slow``: used for backing up databases when removing them from the ``live`` replica set, e.g., when shutting down an application replica set after an event
8
+
9
+The ``archive`` and ``slow`` replica sets usually have only a single instance running on ``dbserver.internal.sapsailing.com``, and this is also where the hidden replica of the ``live`` replica set runs. The other two ``live`` nodes have internal DNS names set for them: ``mongo[01].internal.sapsailing.com``.
10
+
11
+Upgrades may affect the packages installed on the nodes, or may affect the major version of MongoDB being run. Both upgrade procedures are described in the following two sections.
12
+
13
+## Upgrade Using Package Manager
14
+
15
+With Amazon Linux 2023, ``dnf`` is the package manager used. When logging on to an instance, a message like
16
+
17
+```
18
+A newer release of "Amazon Linux" is available.
19
+ Version 2023.10.20260202:
20
+Run "/usr/bin/dnf check-release-update" for full release and version update info
21
+```
22
+
23
+may be shown. In this case, run
24
+
25
+```
26
+dnf --releasever=latest upgrade
27
+```
28
+
29
+and watch closely what the package manager suggests. As soon as you see a kernel update about to install, displayed in red color (if your terminal supports colored output), a reboot will be required after completing the installation. This can also be checked using the following command:
30
+
31
+```
32
+needs-restarting -r
33
+```
34
+
35
+It will output a message like
36
+
37
+```
38
+No core libraries or services have been updated since boot-up.
39
+Reboot should not be necessary.
40
+```
41
+
42
+and exits with code ``0`` if no reboot is required; otherwise, it will exit with ``1`` and display a corresponding message.
43
+
44
+To avoid interrupting user-facing services, rebooting the MongoDB nodes shall follow a certain procedure:
45
+
46
+- Ensure that no ARCHIVE candidate is currently launching; such a candidate would read from the ``archive`` replica set, so that rebooting the ``dbserver.internal.sapsailing.com`` node would interrupt this loading process. If an ARCHIVE candidate is launching, wait for the launch to finish.
47
+- Ensure that no application replica set is currently being shut down with backing up its database. This backup would fail if the ``dbserver.internal.sapsailing.com`` node were restarted as it hosts the ``slow`` replica set used for the backup.
48
+- ssh into ``ec2-user@dbserver.internal.sapsailing.com``
49
+- There, run ``sudo dnf --releasever=latest upgrade`` and confirm with "yes"
50
+- Assuming an update was installed that now requires a reboot, run ``sudo reboot``
51
+- Wait until the instance is back up and running, you can ssh into it again, and ``pgrep mongod`` shows the three process IDs of the three running ``mongod`` processes
52
+- ssh into ``ec2-user@mongo0.internal.sapsailing.com``
53
+- run ``mongosh`` to see if ``mongo0`` is currently primary or secondary in the ``live`` replica set
54
+- if you see "secondary", you're all set; if you see "primary", enter ``rs.stepDown()`` and see how the prompt changes from "primary" to "secondary"
55
+- use ``quit()`` to exit the ``mongosh`` shell
56
+- run ``sudo dnf --releasever=latest upgrade`` and confirm with "yes"
57
+- if a reboot is required, run ``sudo reboot``
58
+- wait for the instance and its ``mongod`` process to become available again; you may probe, e.g., by ssh-ing into the instance and checking with ``mongosh``
59
+- repeat the process described for ``mongo0`` for ``mongo1.internal.sapsailing.com``
60
+
61
+Hint: You can choose the order between ``mongo0`` and ``mongo1`` as you wish. If you start with the "secondary" instance, you will save one ``rs.stepDown()`` command.
62
+
63
+## MongoDB Major Version Upgrade
64
+
65
+Upgrading a MongoDB replica set that has more than one node can work without client noticing any interruption of service. This is in particular important for our ``live`` replica set used by all running application replica sets other than ``ARCHIVE``. For the single-node replica sets ``archive`` and ``slow`` it again comes down to timing an upgrade such that no ``ARCHIVE`` candidate launch is ongoing, and that no application replica set is currently being shut down with its database getting backed up to the ``slow`` replica set.
66
+
67
+The [MongoDB online documentation](https://www.mongodb.com/docs/manual/release-notes/8.0-upgrade-replica-set/#std-label-8.0-upgrade-replica-set) contains a useful description of the steps necessary. The key to understanding those steps is that MongoDB replica sets can distinguish between the actual version of ``mongod`` that is running, and the "protocol version" the nodes use to talk to each other in a replica set. Newer ``mongod`` versions can always still work with the "protocol version" of the previous major release. For example, ``mongod`` in version 8 can still work with protocol version "7.0".
68
+
69
+Therefore, upgrading a replica set with multiple nodes will work along these steps:
70
+
71
+- Ensure all ``mongod`` processes in the replica set run the same (old) version
72
+- Ensure all ``mongod`` processes use the protocol version that matches their own ``mongod`` version
73
+- Upgrade the binaries and restart the ``mongod`` processes with the new version for all nodes, properly having the primary step down before restarting its process
74
+- Set the new protocol version for the replica set
75
+
76
+The sequence in which to work with the different nodes and processes resembles that for reboots after upgrades with the package manager. Here are the steps in detail:
77
+
78
+- Ensure that no ARCHIVE candidate is currently launching; such a candidate would read from the ``archive`` replica set, so that rebooting the ``dbserver.internal.sapsailing.com`` node would interrupt this loading process. If an ARCHIVE candidate is launching, wait for the launch to finish.
79
+- Ensure that no application replica set is currently being shut down with backing up its database. This backup would fail if the ``dbserver.internal.sapsailing.com`` node were restarted as it hosts the ``slow`` replica set used for the backup.
80
+- ssh into ``ec2-user@dbserver.internal.sapsailing.com``
81
+- Ensure all ``mongod`` processes on the host run the same (old) version, using ``mongosh`` for all three replica sets (``live``, ``archive``, ``slow``)
82
+- In ``mongosh``, display the protocol version using ``db.adminCommand( { getParameter: 1, featureCompatibilityVersion: 1 } )``. Should you find a deviation, set the protocol version using ``db.adminCommand( { setFeatureCompatibilityVersion: "7.0" , confirm: true } )`` (of course with the "7.0" replaced by whichever protocol version you have to set this to).
83
+- In ``/etc/yum.repos.d/`` find the ``mongodb-org.{major.minor}.repo`` file that controls where the MongoDB packages are currently obtained from. Rename the current ``mongodb-org.{major.minor}.repo`` by appending, e.g., ``.bak`` to its name and create a new ``.repo`` file for the MongoDB version you'd like to upgrade to. Then run ``dnf --releasever=latest upgrade``. This should automatically restart the ``mongod`` processes now upgraded to the new release.
84
+- ssh into ``ec2-user@mongo0.internal.sapsailing.com``
85
+- check ``mongod`` and protocol version using ``mongosh``; adjust protocol version if necessary (see above)
86
+- if on the "primary", use ``rs.stepDown()`` to make it a "secondary"
87
+- run the binaries upgrade as explained for ``dbserver.internal.sapsailing.com`` above, adjusting the ``.repo`` file under ``/etc/yum.repos.d``, followed by ``dnf --releasever=latest upgrade``
88
+- repeat the last four steps for ``mongo1.internal.sapsailing.com``
89
+- use ``mongosh`` to connect to the primaries of all three replica sets (``live``, ``archive``, ``slow``) and on each one issue the command ``db.adminCommand( { setFeatureCompatibilityVersion: "8.0", confirm: true }`` with the "8.0" replaced by the protocol version you want to upgrade to, so usually the major/minor version of the binaries to which you have upgraded.
90
+
91
+Done :-)
... ...
\ No newline at end of file