Merge pull request #37 from chrislusf/master

sync
This commit is contained in:
hilimd
2020-11-16 16:30:19 +08:00
committed by GitHub
43 changed files with 790 additions and 308 deletions

View File

@@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId>
<version>1.5.3</version>
<version>1.5.4</version>
<parent>
<groupId>org.sonatype.oss</groupId>

View File

@@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId>
<version>1.5.3</version>
<version>1.5.4</version>
<parent>
<groupId>org.sonatype.oss</groupId>

View File

@@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId>
<version>1.5.3</version>
<version>1.5.4</version>
<parent>
<groupId>org.sonatype.oss</groupId>

View File

@@ -275,9 +275,9 @@ public class FilerClient {
try {
FilerProto.CreateEntryResponse createEntryResponse =
filerGrpcClient.getBlockingStub().createEntry(FilerProto.CreateEntryRequest.newBuilder()
.setDirectory(parent)
.setEntry(entry)
.build());
.setDirectory(parent)
.setEntry(entry)
.build());
if (Strings.isNullOrEmpty(createEntryResponse.getError())) {
return true;
}
@@ -333,4 +333,13 @@ public class FilerClient {
return true;
}
public Iterator<FilerProto.SubscribeMetadataResponse> watch(String prefix, String clientName, long sinceNs) {
return filerGrpcClient.getBlockingStub().subscribeMetadata(FilerProto.SubscribeMetadataRequest.newBuilder()
.setPathPrefix(prefix)
.setClientName(clientName)
.setSinceNs(sinceNs)
.build()
);
}
}

View File

@@ -28,20 +28,26 @@ public class SeaweedRead {
List<ChunkView> chunkViews = viewFromVisibles(visibleIntervals, position, bufferLength);
Map<String, FilerProto.Locations> knownLocations = new HashMap<>();
FilerProto.LookupVolumeRequest.Builder lookupRequest = FilerProto.LookupVolumeRequest.newBuilder();
for (ChunkView chunkView : chunkViews) {
String vid = parseVolumeId(chunkView.fileId);
if (volumeIdCache.getLocations(vid)==null){
FilerProto.Locations locations = volumeIdCache.getLocations(vid);
if (locations == null) {
lookupRequest.addVolumeIds(vid);
} else {
knownLocations.put(vid, locations);
}
}
if (lookupRequest.getVolumeIdsCount()>0){
if (lookupRequest.getVolumeIdsCount() > 0) {
FilerProto.LookupVolumeResponse lookupResponse = filerGrpcClient
.getBlockingStub().lookupVolume(lookupRequest.build());
Map<String, FilerProto.Locations> vid2Locations = lookupResponse.getLocationsMapMap();
for (Map.Entry<String,FilerProto.Locations> entry : vid2Locations.entrySet()) {
for (Map.Entry<String, FilerProto.Locations> entry : vid2Locations.entrySet()) {
volumeIdCache.setLocations(entry.getKey(), entry.getValue());
knownLocations.put(entry.getKey(), entry.getValue());
}
}
@@ -57,7 +63,7 @@ public class SeaweedRead {
startOffset += gap;
}
FilerProto.Locations locations = volumeIdCache.getLocations(parseVolumeId(chunkView.fileId));
FilerProto.Locations locations = knownLocations.get(parseVolumeId(chunkView.fileId));
if (locations == null || locations.getLocationsCount() == 0) {
LOG.error("failed to locate {}", chunkView.fileId);
// log here!

View File

@@ -15,7 +15,7 @@ public class VolumeIdCache {
}
this.cache = CacheBuilder.newBuilder()
.maximumSize(maxEntries)
.expireAfterAccess(1, TimeUnit.HOURS)
.expireAfterAccess(5, TimeUnit.MINUTES)
.build();
}

View File

@@ -362,6 +362,7 @@ message FilerConf {
SSD = 1;
}
DiskType disk_type = 5;
bool fsync = 6;
}
repeated PathConf locations = 2;
}

View File

@@ -11,13 +11,13 @@
<dependency>
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId>
<version>1.5.3</version>
<version>1.5.4</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-hadoop2-client</artifactId>
<version>1.5.3</version>
<version>1.5.4</version>
<scope>compile</scope>
</dependency>
<dependency>

View File

@@ -1,4 +1,4 @@
package com.example.test;
package com.seaweedfs.examples;
import seaweed.hdfs.SeaweedInputStream;
import seaweedfs.client.FilerClient;
@@ -10,17 +10,20 @@ import java.io.InputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
public class Example {
public static FilerClient filerClient = new FilerClient("localhost", 18888);
public static FilerGrpcClient filerGrpcClient = new FilerGrpcClient("localhost", 18888);
public class UnzipFile {
public static void main(String[] args) throws IOException {
// 本地模式速度很快
FilerGrpcClient filerGrpcClient = new FilerGrpcClient("localhost", 18888);
FilerClient filerClient = new FilerClient(filerGrpcClient);
long startTime = System.currentTimeMillis();
parseZip("/Users/chris/tmp/test.zip");
// swfs读取
long startTime2 = System.currentTimeMillis();
long localProcessTime = startTime2 - startTime;
SeaweedInputStream seaweedInputStream = new SeaweedInputStream(
filerGrpcClient,
new org.apache.hadoop.fs.FileSystem.Statistics(""),
@@ -29,6 +32,11 @@ public class Example {
);
parseZip(seaweedInputStream);
long swProcessTime = System.currentTimeMillis() - startTime2;
System.out.println("Local time: " + localProcessTime);
System.out.println("SeaweedFS time: " + swProcessTime);
}
public static void parseZip(String filename) throws IOException {

View File

@@ -0,0 +1,46 @@
package com.seaweedfs.examples;
import seaweedfs.client.FilerClient;
import seaweedfs.client.FilerProto;
import java.io.IOException;
import java.util.Date;
import java.util.Iterator;
public class WatchFiles {
public static void main(String[] args) throws IOException {
FilerClient filerClient = new FilerClient("localhost", 18888);
long sinceNs = (System.currentTimeMillis() - 3600 * 1000) * 1000000L;
Iterator<FilerProto.SubscribeMetadataResponse> watch = filerClient.watch(
"/buckets",
"exampleClientName",
sinceNs
);
System.out.println("Connected to filer, subscribing from " + new Date());
while (watch.hasNext()) {
FilerProto.SubscribeMetadataResponse event = watch.next();
FilerProto.EventNotification notification = event.getEventNotification();
if (!event.getDirectory().equals(notification.getNewParentPath())) {
// move an entry to a new directory, possibly with a new name
if (notification.hasOldEntry() && notification.hasNewEntry()) {
System.out.println("moved " + event.getDirectory() + "/" + notification.getOldEntry().getName() + " to " + notification.getNewParentPath() + "/" + notification.getNewEntry().getName());
} else {
System.out.println("this should not happen.");
}
} else if (notification.hasNewEntry() && !notification.hasOldEntry()) {
System.out.println("created entry " + event.getDirectory() + "/" + notification.getNewEntry().getName());
} else if (!notification.hasNewEntry() && notification.hasOldEntry()) {
System.out.println("deleted entry " + event.getDirectory() + "/" + notification.getOldEntry().getName());
} else if (notification.hasNewEntry() && notification.hasOldEntry()) {
System.out.println("updated entry " + event.getDirectory() + "/" + notification.getNewEntry().getName());
}
}
}
}

View File

@@ -301,7 +301,7 @@
</snapshotRepository>
</distributionManagement>
<properties>
<seaweedfs.client.version>1.5.3</seaweedfs.client.version>
<seaweedfs.client.version>1.5.4</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version>
</properties>
</project>

View File

@@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion>
<properties>
<seaweedfs.client.version>1.5.3</seaweedfs.client.version>
<seaweedfs.client.version>1.5.4</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version>
</properties>

View File

@@ -309,7 +309,7 @@
</snapshotRepository>
</distributionManagement>
<properties>
<seaweedfs.client.version>1.5.3</seaweedfs.client.version>
<seaweedfs.client.version>1.5.4</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version>
</properties>
</project>

View File

@@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion>
<properties>
<seaweedfs.client.version>1.5.3</seaweedfs.client.version>
<seaweedfs.client.version>1.5.4</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version>
</properties>