Skip to content

Commit 2d066f7

Browse files
committed
Refactor Docker image handling by replacing ArchiveService with LayerProcessor for layer merging and extraction
1 parent cce48ba commit 2d066f7

File tree

6 files changed

+375
-39
lines changed

6 files changed

+375
-39
lines changed

lib/api/docker_images.dart

Lines changed: 89 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ import 'package:chunked_downloader/chunked_downloader.dart';
77
import 'package:dio/dio.dart';
88
import 'package:flutter/foundation.dart';
99
import 'package:localization/localization.dart';
10-
import 'package:wsl2distromanager/api/archive.dart';
10+
import 'package:wsl2distromanager/api/layer_processor.dart';
1111
import 'package:wsl2distromanager/api/safe_paths.dart';
1212
import 'package:wsl2distromanager/components/helpers.dart';
1313
import 'package:wsl2distromanager/components/logging.dart';
@@ -145,12 +145,10 @@ class DockerImage {
145145
String? distroName;
146146
final Dio dio;
147147
final ChunkedDownloaderFactory chunkedDownloaderFactory;
148-
final ArchiveService archiveService;
149148

150149
DockerImage(
151150
{Dio? dio,
152151
ChunkedDownloaderFactory? chunkedDownloaderFactory,
153-
ArchiveService? archiveService,
154152
String? registryUrl,
155153
this.authUrl = 'https://auth.docker.io/token',
156154
this.svcUrl = 'registry.docker.io'})
@@ -174,8 +172,7 @@ class DockerImage {
174172
chunkSize: chunkSize ?? 1024 * 1024,
175173
onProgress: onProgress,
176174
onDone: onDone,
177-
onError: onError)),
178-
archiveService = archiveService ?? ArchiveService() {
175+
onError: onError)) {
179176
String? mirror = prefs.getString('DockerMirror');
180177
if (mirror != null && mirror.isNotEmpty) {
181178
this.registryUrl = mirror;
@@ -349,8 +346,85 @@ class DockerImage {
349346
ImageManifest.fromMap(await _getManifest(image, token, digest));
350347

351348
final config = imageManifest.config.digest;
352-
await _downloadBlob(image, token, config,
353-
SafePath(path).file('config.json'), (p0, p1) {});
349+
final configPath = SafePath(path).file('config.json');
350+
await _downloadBlob(image, token, config, configPath, (p0, p1) {});
351+
352+
// Parse config.json for metadata (V2)
353+
try {
354+
if (await File(configPath).exists()) {
355+
final configContent = await File(configPath).readAsString();
356+
final configJson = json.decode(configContent);
357+
358+
if (configJson['config'] != null) {
359+
final parsedConfig = configJson['config'];
360+
final env = parsedConfig['Env'];
361+
final cmd = parsedConfig['Cmd'];
362+
final entrypoint = parsedConfig['Entrypoint'];
363+
final user = parsedConfig['User'];
364+
365+
// Handle User
366+
if (user != null && user is String && user.isNotEmpty) {
367+
var userStr = user;
368+
if (userStr.contains(':')) {
369+
userStr = userStr.split(':')[0];
370+
}
371+
if (int.tryParse(userStr) == null) {
372+
prefs.setString('StartUser_$distroName', userStr);
373+
} else {
374+
Notify.message(
375+
'Not implemented yet: Docker USER is a number.');
376+
}
377+
}
378+
379+
// Handle Env, Entrypoint, Cmd
380+
var entrypointCmd = '';
381+
if (entrypoint != null && entrypoint is List) {
382+
entrypointCmd = entrypoint.map((e) => e.toString()).join(' ');
383+
}
384+
385+
String exportEnv = '';
386+
if (env != null && env is List) {
387+
exportEnv = env.map((e) => 'export $e;').join(' ');
388+
}
389+
390+
// Use distroName for StartCmd so it applies to the instance
391+
if (cmd != null && cmd is List) {
392+
prefs.setString('StartCmd_$distroName',
393+
'$exportEnv $entrypointCmd; ${cmd.map((e) => e.toString()).join(' ')}');
394+
} else if (entrypointCmd.isNotEmpty) {
395+
prefs.setString(
396+
'StartCmd_$distroName', '$exportEnv $entrypointCmd');
397+
}
398+
}
399+
400+
// Handle history for UserCmds/GroupCmds
401+
// These need to be saved under image filename to be picked up by create_dialog
402+
String imageName = filename(image, tag);
403+
List<String> userCmds = [];
404+
List<String> groupCmds = [];
405+
406+
if (configJson['history'] != null &&
407+
configJson['history'] is List) {
408+
for (var item in configJson['history']) {
409+
final createdBy = item['created_by'] as String?;
410+
if (createdBy != null) {
411+
if (createdBy.contains('adduser') ||
412+
createdBy.contains('useradd')) {
413+
userCmds.add(createdBy);
414+
}
415+
if (createdBy.contains('groupadd') ||
416+
createdBy.contains('addgroup')) {
417+
groupCmds.add(createdBy);
418+
}
419+
}
420+
}
421+
}
422+
prefs.setStringList('UserCmds_$imageName', userCmds);
423+
prefs.setStringList('GroupCmds_$imageName', groupCmds);
424+
}
425+
} catch (e, stack) {
426+
logDebug(e, stack, 'Failed to parse V2 config');
427+
}
354428
} catch (e, stackTrace) {
355429
if (kDebugMode) {
356430
print(e);
@@ -456,8 +530,8 @@ class DockerImage {
456530
// Set image specific commands
457531
String name = filename(image, tag);
458532
if (cmd != null) {
459-
prefs.setString(
460-
'StartCmd_$name', '$exportEnv $entrypointCmd; ${cmd.join(' ')}');
533+
prefs.setString('StartCmd_$distroName',
534+
'$exportEnv $entrypointCmd; ${cmd.join(' ')}');
461535
}
462536
prefs.setStringList('UserCmds_$name', userCmds);
463537
prefs.setStringList('GroupCmds_$name', groupCmds);
@@ -570,38 +644,17 @@ class DockerImage {
570644
int retry = 0;
571645

572646
final parentPath = SafePath(tmpImagePath);
573-
String outTar = parentPath.file('$imageName.tar');
574647
String outTarGz = SafePath(distroPath).file('$imageName.tar.gz');
575648
while (retry < 2) {
576649
try {
577-
// More than one layer
578-
List<String> paths = [];
579-
if (layers != 1) {
580-
for (var i = 0; i < layers; i++) {
581-
// Read archives layers
582-
if (kDebugMode) {
583-
print('Extracting layer $i of $layers');
584-
}
585-
// progress(i, layers, -1, -1);
586-
Notify.message('Extracting layer $i of $layers');
587-
588-
// Extract layer
589-
final layerTarGz = parentPath.file('layer_$i.tar.gz');
590-
await archiveService.extract(layerTarGz, parentPath.path);
591-
paths.add(parentPath.file('layer_$i.tar'));
592-
}
593-
594-
// Archive as tar then gzip to disk
595-
await archiveService.merge(paths, outTar);
596-
await archiveService.compress(outTar, outTarGz);
597-
598-
Notify.message('writingtodisk-text'.i18n());
599-
} else if (layers == 1) {
600-
// Just copy the file
601-
File(SafePath(tmpImagePath).file('layer_0.tar.gz'))
602-
.copySync(outTarGz);
650+
List<String> layerPaths = [];
651+
for (var i = 0; i < layers; i++) {
652+
layerPaths.add(parentPath.file('layer_$i.tar.gz'));
603653
}
604654

655+
await LayerProcessor()
656+
.mergeLayers(layerPaths, outTarGz, (msg) => Notify.message(msg));
657+
605658
retry = 2;
606659
break;
607660
} catch (e, stackTrace) {

lib/api/layer_processor.dart

Lines changed: 113 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,113 @@
1+
import 'dart:async';
2+
import 'dart:io';
3+
import 'package:tar/tar.dart';
4+
import 'package:path/path.dart' as p;
5+
6+
class LayerProcessor {
7+
/// Merges Docker layers into a single rootfs tarball, handling whiteouts.
8+
/// [layerPaths] is a list of paths to .tar.gz layer files, ordered from bottom (base) to top.
9+
/// [outputPath] is the path where the resulting .tar.gz will be written.
10+
Future<void> mergeLayers(List<String> layerPaths, String outputPath,
11+
Function(String) onStatus) async {
12+
final keptFiles = <String, int>{}; // Path -> Layer Index
13+
14+
onStatus('Scanning layers...');
15+
// Pass 1: Determine which files to keep
16+
for (var i = 0; i < layerPaths.length; i++) {
17+
onStatus('Scanning layer ${i + 1}/${layerPaths.length}...');
18+
final layerPath = layerPaths[i];
19+
final file = File(layerPath);
20+
if (!await file.exists()) continue;
21+
22+
final reader = TarReader(file.openRead().transform(gzip.decoder));
23+
try {
24+
while (await reader.moveNext()) {
25+
final entry = reader.current;
26+
final path = entry.name;
27+
final filename = p.posix.basename(path);
28+
final dirname = p.posix.dirname(path);
29+
30+
if (filename.startsWith('.wh.')) {
31+
if (filename == '.wh..wh..opq') {
32+
// Opaque whiteout: hide all siblings in this directory from lower layers
33+
keptFiles.removeWhere((key, value) {
34+
final keyDir = p.posix.dirname(key);
35+
return keyDir == dirname && value < i;
36+
});
37+
} else {
38+
// Explicit whiteout
39+
final realFilename = filename.substring(4);
40+
final realPath =
41+
p.posix.normalize(p.posix.join(dirname, realFilename));
42+
keptFiles.remove(realPath);
43+
}
44+
} else {
45+
// Normal file/directory
46+
final normalizedPath = p.posix.normalize(path);
47+
keptFiles[normalizedPath] = i;
48+
}
49+
}
50+
} finally {
51+
await reader.cancel();
52+
}
53+
}
54+
55+
onStatus('Writing rootfs...');
56+
// Pass 2: Write the output tar
57+
final outFile = File(outputPath);
58+
if (await outFile.exists()) {
59+
await outFile.delete();
60+
}
61+
62+
Stream<TarEntry> streamLayers() async* {
63+
for (var i = 0; i < layerPaths.length; i++) {
64+
onStatus('Merging layer ${i + 1}/${layerPaths.length}...');
65+
final layerPath = layerPaths[i];
66+
final file = File(layerPath);
67+
if (!await file.exists()) continue;
68+
69+
final reader = TarReader(file.openRead().transform(gzip.decoder));
70+
try {
71+
while (await reader.moveNext()) {
72+
final entry = reader.current;
73+
final path = entry.name;
74+
final filename = p.posix.basename(path);
75+
76+
// Skip whiteout files themselves in the output
77+
if (filename.startsWith('.wh.')) {
78+
continue;
79+
}
80+
81+
final normalizedPath = p.posix.normalize(path);
82+
83+
if (keptFiles[normalizedPath] == i) {
84+
// We need to wait for the entry to be fully consumed by the writer
85+
// before moving the reader to the next entry.
86+
final completer = Completer();
87+
final trackedStream = entry.contents.transform(
88+
StreamTransformer<List<int>, List<int>>.fromHandlers(
89+
handleData: (data, sink) => sink.add(data),
90+
handleError: (error, stack, sink) =>
91+
sink.addError(error, stack),
92+
handleDone: (sink) {
93+
sink.close();
94+
completer.complete();
95+
},
96+
));
97+
98+
yield TarEntry(entry.header, trackedStream);
99+
await completer.future;
100+
}
101+
}
102+
} finally {
103+
await reader.cancel();
104+
}
105+
}
106+
}
107+
108+
await streamLayers()
109+
.transform(tarWriter)
110+
.transform(gzip.encoder)
111+
.pipe(outFile.openWrite());
112+
}
113+
}

pubspec.lock

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -947,7 +947,7 @@ packages:
947947
source: hosted
948948
version: "0.0.3"
949949
tar:
950-
dependency: transitive
950+
dependency: "direct main"
951951
description:
952952
name: tar
953953
sha256: b338bacfd24dae6cf527acb4242003a71fc88ce183a9002376fabbc4ebda30c9

pubspec.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ dependencies:
3636
shelf_static: ^1.1.3
3737
system_info2: ^4.1.0
3838
system_theme: ^3.1.2
39+
tar: ^2.0.2
3940
url_launcher: ^6.3.2
4041
window_manager: ^0.5.1
4142
yaml: ^3.1.3

test/docker_images_unit_test.dart

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,6 @@ void main() {
125125
onProgress: onProgress,
126126
onDone: onDone,
127127
onError: onError),
128-
archiveService: MockArchiveService(),
129128
);
130129
});
131130

@@ -218,7 +217,6 @@ void main() {
218217
onProgress: onProgress,
219218
onDone: onDone,
220219
onError: onError),
221-
archiveService: MockArchiveService(),
222220
);
223221

224222
// Mock token for custom registry (if needed, or just standard auth url if not changed)

0 commit comments

Comments
 (0)