1
0
mirror of https://github.com/square/okhttp.git synced 2026-01-25 16:01:38 +03:00

Merge pull request #101 from square/jwilson/comments

Switch to Square style for comments.
This commit is contained in:
Marcelo Cortes
2013-02-01 08:17:40 -08:00
14 changed files with 147 additions and 202 deletions

View File

@@ -98,45 +98,43 @@ public final class DiskLruCache implements Closeable {
private static final String REMOVE = "REMOVE";
private static final String READ = "READ";
/*
* This cache uses a journal file named "journal". A typical journal file
* looks like this:
* libcore.io.DiskLruCache
* 1
* 100
* 2
*
* CLEAN 3400330d1dfc7f3f7f4b8d4d803dfcf6 832 21054
* DIRTY 335c4c6028171cfddfbaae1a9c313c52
* CLEAN 335c4c6028171cfddfbaae1a9c313c52 3934 2342
* REMOVE 335c4c6028171cfddfbaae1a9c313c52
* DIRTY 1ab96a171faeeee38496d8b330771a7a
* CLEAN 1ab96a171faeeee38496d8b330771a7a 1600 234
* READ 335c4c6028171cfddfbaae1a9c313c52
* READ 3400330d1dfc7f3f7f4b8d4d803dfcf6
*
* The first five lines of the journal form its header. They are the
* constant string "libcore.io.DiskLruCache", the disk cache's version,
* the application's version, the value count, and a blank line.
*
* Each of the subsequent lines in the file is a record of the state of a
* cache entry. Each line contains space-separated values: a state, a key,
* and optional state-specific values.
* o DIRTY lines track that an entry is actively being created or updated.
* Every successful DIRTY action should be followed by a CLEAN or REMOVE
* action. DIRTY lines without a matching CLEAN or REMOVE indicate that
* temporary files may need to be deleted.
* o CLEAN lines track a cache entry that has been successfully published
* and may be read. A publish line is followed by the lengths of each of
* its values.
* o READ lines track accesses for LRU.
* o REMOVE lines track entries that have been deleted.
*
* The journal file is appended to as cache operations occur. The journal may
* occasionally be compacted by dropping redundant lines. A temporary file named
* "journal.tmp" will be used during compaction; that file should be deleted if
* it exists when the cache is opened.
*/
// This cache uses a journal file named "journal". A typical journal file
// looks like this:
// libcore.io.DiskLruCache
// 1
// 100
// 2
//
// CLEAN 3400330d1dfc7f3f7f4b8d4d803dfcf6 832 21054
// DIRTY 335c4c6028171cfddfbaae1a9c313c52
// CLEAN 335c4c6028171cfddfbaae1a9c313c52 3934 2342
// REMOVE 335c4c6028171cfddfbaae1a9c313c52
// DIRTY 1ab96a171faeeee38496d8b330771a7a
// CLEAN 1ab96a171faeeee38496d8b330771a7a 1600 234
// READ 335c4c6028171cfddfbaae1a9c313c52
// READ 3400330d1dfc7f3f7f4b8d4d803dfcf6
//
// The first five lines of the journal form its header. They are the
// constant string "libcore.io.DiskLruCache", the disk cache's version,
// the application's version, the value count, and a blank line.
//
// Each of the subsequent lines in the file is a record of the state of a
// cache entry. Each line contains space-separated values: a state, a key,
// and optional state-specific values.
// o DIRTY lines track that an entry is actively being created or updated.
// Every successful DIRTY action should be followed by a CLEAN or REMOVE
// action. DIRTY lines without a matching CLEAN or REMOVE indicate that
// temporary files may need to be deleted.
// o CLEAN lines track a cache entry that has been successfully published
// and may be read. A publish line is followed by the lengths of each of
// its values.
// o READ lines track accesses for LRU.
// o REMOVE lines track entries that have been deleted.
//
// The journal file is appended to as cache operations occur. The journal may
// occasionally be compacted by dropping redundant lines. A temporary file named
// "journal.tmp" will be used during compaction; that file should be deleted if
// it exists when the cache is opened.
private final File directory;
private final File journalFile;
@@ -371,11 +369,9 @@ public final class DiskLruCache implements Closeable {
return null;
}
/*
* Open all streams eagerly to guarantee that we see a single published
* snapshot. If we opened streams lazily then the streams could come
* from different edits.
*/
// Open all streams eagerly to guarantee that we see a single published
// snapshot. If we opened streams lazily then the streams could come
// from different edits.
InputStream[] ins = new InputStream[valueCount];
try {
for (int i = 0; i < valueCount; i++) {

View File

@@ -52,12 +52,10 @@ public class StrictLineReader implements Closeable {
private final InputStream in;
private final Charset charset;
/*
* Buffered data is stored in {@code buf}. As long as no exception occurs, 0 <= pos <= end
* and the data in the range [pos, end) is buffered for reading. At end of input, if there is
* an unterminated line, we set end == -1, otherwise end == pos. If the underlying
* {@code InputStream} throws an {@code IOException}, end may remain as either pos or -1.
*/
// Buffered data is stored in {@code buf}. As long as no exception occurs, 0 <= pos <= end
// and the data in the range [pos, end) is buffered for reading. At end of input, if there is
// an unterminated line, we set end == -1, otherwise end == pos. If the underlying
// {@code InputStream} throws an {@code IOException}, end may remain as either pos or -1.
private byte[] buf;
private int pos;
private int end;

View File

@@ -111,13 +111,11 @@ public final class HttpAuthenticator {
*/
private static List<Challenge> parseChallenges(RawHeaders responseHeaders,
String challengeHeader) {
/*
* auth-scheme = token
* auth-param = token "=" ( token | quoted-string )
* challenge = auth-scheme 1*SP 1#auth-param
* realm = "realm" "=" realm-value
* realm-value = quoted-string
*/
// auth-scheme = token
// auth-param = token "=" ( token | quoted-string )
// challenge = auth-scheme 1*SP 1#auth-param
// realm = "realm" "=" realm-value
// realm-value = quoted-string
List<Challenge> result = new ArrayList<Challenge>();
for (int h = 0; h < responseHeaders.length(); h++) {
if (!challengeHeader.equalsIgnoreCase(responseHeaders.getFieldName(h))) {

View File

@@ -121,12 +121,10 @@ public class HttpEngine {
/** Null until a response is received from the network or the cache. */
ResponseHeaders responseHeaders;
/*
* The cache response currently being validated on a conditional get. Null
* if the cached response doesn't exist or doesn't need validation. If the
* conditional get succeeds, these will be used for the response headers and
* body. If it fails, these be closed and set to null.
*/
// The cache response currently being validated on a conditional get. Null
// if the cached response doesn't exist or doesn't need validation. If the
// conditional get succeeds, these will be used for the response headers and
// body. If it fails, these be closed and set to null.
private ResponseHeaders cachedResponseHeaders;
private InputStream cachedResponseBody;
@@ -183,12 +181,10 @@ public class HttpEngine {
((OkResponseCache) policy.responseCache).trackResponse(responseSource);
}
/*
* The raw response source may require the network, but the request
* headers may forbid network use. In that case, dispose of the network
* response and use a GATEWAY_TIMEOUT response instead, as specified
* by http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.4.
*/
// The raw response source may require the network, but the request
// headers may forbid network use. In that case, dispose of the network
// response and use a GATEWAY_TIMEOUT response instead, as specified
// by http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.4.
if (requestHeaders.isOnlyIfCached() && responseSource.requiresConnection()) {
if (responseSource == ResponseSource.CONDITIONAL_CACHE) {
Util.closeQuietly(cachedResponseBody);
@@ -447,15 +443,13 @@ public class HttpEngine {
private void initContentStream(InputStream transferStream) throws IOException {
responseTransferIn = transferStream;
if (transparentGzip && responseHeaders.isContentEncodingGzip()) {
/*
* If the response was transparently gzipped, remove the gzip header field
* so clients don't double decompress. http://b/3009828
*
* Also remove the Content-Length in this case because it contains the
* length 528 of the gzipped response. This isn't terribly useful and is
* dangerous because 529 clients can query the content length, but not
* the content encoding.
*/
// If the response was transparently gzipped, remove the gzip header field
// so clients don't double decompress. http://b/3009828
//
// Also remove the Content-Length in this case because it contains the
// length 528 of the gzipped response. This isn't terribly useful and is
// dangerous because 529 clients can query the content length, but not
// the content encoding.
responseHeaders.stripContentEncoding();
responseHeaders.stripContentLength();
responseBodyIn = new GZIPInputStream(transferStream);
@@ -482,11 +476,9 @@ public class HttpEngine {
return true;
}
/*
* If the Content-Length or Transfer-Encoding headers disagree with the
* response code, the response is malformed. For best compatibility, we
* honor the headers.
*/
// If the Content-Length or Transfer-Encoding headers disagree with the
// response code, the response is malformed. For best compatibility, we
// honor the headers.
if (responseHeaders.getContentLength() != -1 || responseHeaders.isChunked()) {
return true;
}

View File

@@ -151,11 +151,9 @@ public final class HttpResponseCache extends ResponseCache implements OkResponse
}
return null;
} else if (!requestMethod.equals("GET")) {
/*
* Don't cache non-GET responses. We're technically allowed to cache
* HEAD requests and some POST requests, but the complexity of doing
* so is high and the benefit is low.
*/
// Don't cache non-GET responses. We're technically allowed to cache
// HEAD requests and some POST requests, but the complexity of doing
// so is high and the benefit is low.
return null;
}
@@ -338,8 +336,9 @@ public final class HttpResponseCache extends ResponseCache implements OkResponse
private final Certificate[] peerCertificates;
private final Certificate[] localCertificates;
/*
/**
* Reads an entry from an input stream. A typical entry looks like this:
* <pre>{@code
* http://google.com/foo
* GET
* 2
@@ -350,8 +349,10 @@ public final class HttpResponseCache extends ResponseCache implements OkResponse
* Content-Type: image/png
* Content-Length: 100
* Cache-Control: max-age=600
* }</pre>
*
* A typical HTTPS file looks like this:
* <p>A typical HTTPS file looks like this:
* <pre>{@code
* https://google.com/foo
* GET
* 2
@@ -368,15 +369,15 @@ public final class HttpResponseCache extends ResponseCache implements OkResponse
* base64-encoded peerCertificate[0]
* base64-encoded peerCertificate[1]
* -1
*
* }</pre>
* The file is newline separated. The first two lines are the URL and
* the request method. Next is the number of HTTP Vary request header
* lines, followed by those lines.
*
* Next is the response status line, followed by the number of HTTP
* <p>Next is the response status line, followed by the number of HTTP
* response header lines, followed by those lines.
*
* HTTPS responses also contain SSL session information. This begins
* <p>HTTPS responses also contain SSL session information. This begins
* with a blank line, and then a line containing the cipher suite. Next
* is the length of the peer certificate chain. These certificates are
* base64-encoded and appear each on their own line. The next line

View File

@@ -217,11 +217,9 @@ public final class HttpTransport implements Transport {
httpEngine.responseHeaders.getContentLength());
}
/*
* Wrap the input stream from the connection (rather than just returning
* "socketIn" directly here), so that we can control its use after the
* reference escapes.
*/
// Wrap the input stream from the connection (rather than just returning
// "socketIn" directly here), so that we can control its use after the
// reference escapes.
return new UnknownLengthHttpInputStream(socketIn, cacheRequest, httpEngine);
}

View File

@@ -185,12 +185,10 @@ public class HttpURLConnectionImpl extends HttpURLConnection {
HttpEngine response = getResponse();
/*
* if the requested file does not exist, throw an exception formerly the
* Error page from the server was returned if the requested file was
* text/html this has changed to return FileNotFoundException for all
* file types
*/
// if the requested file does not exist, throw an exception formerly the
// Error page from the server was returned if the requested file was
// text/html this has changed to return FileNotFoundException for all
// file types
if (getResponseCode() >= HTTP_BAD_REQUEST) {
throw new FileNotFoundException(url.toString());
}
@@ -294,17 +292,13 @@ public class HttpURLConnectionImpl extends HttpURLConnection {
return httpEngine;
}
/*
* The first request was insufficient. Prepare for another...
*/
// The first request was insufficient. Prepare for another...
String retryMethod = method;
OutputStream requestBody = httpEngine.getRequestBody();
/*
* Although RFC 2616 10.3.2 specifies that a HTTP_MOVED_PERM
* redirect should keep the same method, Chrome, Firefox and the
* RI all issue GETs when following any redirect.
*/
// Although RFC 2616 10.3.2 specifies that a HTTP_MOVED_PERM
// redirect should keep the same method, Chrome, Firefox and the
// RI all issue GETs when following any redirect.
int responseCode = getResponseCode();
if (responseCode == HTTP_MULT_CHOICE
|| responseCode == HTTP_MOVED_PERM

View File

@@ -191,11 +191,9 @@ public final class RawHeaders {
throw new IllegalArgumentException("fieldName == null");
}
if (value == null) {
/*
* Given null values, the RI sends a malformed field line like
* "Accept\r\n". For platform compatibility and HTTP compliance, we
* print a warning and ignore null values.
*/
// Given null values, the RI sends a malformed field line like
// "Accept\r\n". For platform compatibility and HTTP compliance, we
// print a warning and ignore null values.
Platform.get()
.logW("Ignoring HTTP header field '" + fieldName + "' because its value is null");
return;

View File

@@ -298,12 +298,10 @@ final class ResponseHeaders {
long delta = expires.getTime() - servedMillis;
return delta > 0 ? delta : 0;
} else if (lastModified != null && uri.getRawQuery() == null) {
/*
* As recommended by the HTTP RFC and implemented in Firefox, the
* max age of a document should be defaulted to 10% of the
* document's age at the time it was served. Default expiration
* dates aren't used for URIs containing a query.
*/
// As recommended by the HTTP RFC and implemented in Firefox, the
// max age of a document should be defaulted to 10% of the
// document's age at the time it was served. Default expiration
// dates aren't used for URIs containing a query.
long servedMillis = servedDate != null ? servedDate.getTime() : sentRequestMillis;
long delta = servedMillis - lastModified.getTime();
return delta > 0 ? (delta / 10) : 0;
@@ -325,10 +323,8 @@ final class ResponseHeaders {
* request.
*/
public boolean isCacheable(RequestHeaders request) {
/*
* Always go to network for uncacheable response codes (RFC 2616, 13.4),
* This implementation doesn't support caching partial content.
*/
// Always go to network for uncacheable response codes (RFC 2616, 13.4),
// This implementation doesn't support caching partial content.
int responseCode = headers.getResponseCode();
if (responseCode != HttpURLConnection.HTTP_OK
&& responseCode != HttpURLConnection.HTTP_NOT_AUTHORITATIVE
@@ -338,10 +334,8 @@ final class ResponseHeaders {
return false;
}
/*
* Responses to authorized requests aren't cacheable unless they include
* a 'public', 'must-revalidate' or 's-maxage' directive.
*/
// Responses to authorized requests aren't cacheable unless they include
// a 'public', 'must-revalidate' or 's-maxage' directive.
if (request.hasAuthorization() && !isPublic && !mustRevalidate && sMaxAgeSeconds == -1) {
return false;
}
@@ -377,11 +371,9 @@ final class ResponseHeaders {
/** Returns the source to satisfy {@code request} given this cached response. */
public ResponseSource chooseResponseSource(long nowMillis, RequestHeaders request) {
/*
* If this response shouldn't have been stored, it should never be used
* as a response source. This check should be redundant as long as the
* persistence store is well-behaved and the rules are constant.
*/
// If this response shouldn't have been stored, it should never be used
// as a response source. This check should be redundant as long as the
// persistence store is well-behaved and the rules are constant.
if (!isCacheable(request)) {
return ResponseSource.NETWORK;
}
@@ -439,11 +431,9 @@ final class ResponseHeaders {
return true;
}
/*
* The HTTP spec says that if the network's response is older than our
* cached response, we may return the cache's response. Like Chrome (but
* unlike Firefox), this client prefers to return the newer response.
*/
// The HTTP spec says that if the network's response is older than our
// cached response, we may return the cache's response. Like Chrome (but
// unlike Firefox), this client prefers to return the newer response.
if (lastModified != null
&& networkResponse.lastModified != null
&& networkResponse.lastModified.getTime() < lastModified.getTime()) {

View File

@@ -43,19 +43,17 @@ import java.util.concurrent.TimeUnit;
*/
public final class SpdyConnection implements Closeable {
/*
* Internal state of this connection is guarded by 'this'. No blocking
* operations may be performed while holding this lock!
*
* Socket writes are guarded by spdyWriter.
*
* Socket reads are unguarded but are only made by the reader thread.
*
* Certain operations (like SYN_STREAM) need to synchronize on both the
* spdyWriter (to do blocking I/O) and this (to create streams). Such
* operations must synchronize on 'this' last. This ensures that we never
* wait for a blocking operation while holding 'this'.
*/
// Internal state of this connection is guarded by 'this'. No blocking
// operations may be performed while holding this lock!
//
// Socket writes are guarded by spdyWriter.
//
// Socket reads are unguarded but are only made by the reader thread.
//
// Certain operations (like SYN_STREAM) need to synchronize on both the
// spdyWriter (to do blocking I/O) and this (to create streams). Such
// operations must synchronize on 'this' last. This ensures that we never
// wait for a blocking operation while holding 'this'.
static final int FLAG_FIN = 0x1;
static final int FLAG_UNIDIRECTIONAL = 0x2;

View File

@@ -32,10 +32,8 @@ import static java.nio.ByteOrder.BIG_ENDIAN;
/** A logical bidirectional stream. */
public final class SpdyStream {
/*
* Internal state is guarded by this. No long-running or potentially
* blocking operations are performed while the lock is held.
*/
// Internal state is guarded by this. No long-running or potentially
// blocking operations are performed while the lock is held.
private static final int DATA_FRAME_HEADER_LENGTH = 8;
@@ -385,19 +383,17 @@ public final class SpdyStream {
* it is not intended for use by multiple readers.
*/
private final class SpdyDataInputStream extends InputStream {
/*
* Store incoming data bytes in a circular buffer. When the buffer is
* empty, pos == -1. Otherwise pos is the first byte to read and limit
* is the first byte to write.
*
* { - - - X X X X - - - }
* ^ ^
* pos limit
*
* { X X X - - - - X X X }
* ^ ^
* limit pos
*/
// Store incoming data bytes in a circular buffer. When the buffer is
// empty, pos == -1. Otherwise pos is the first byte to read and limit
// is the first byte to write.
//
// { - - - X X X X - - - }
// ^ ^
// pos limit
//
// { X X X - - - - X X X }
// ^ ^
// limit pos
private final byte[] buffer = new byte[Settings.DEFAULT_INITIAL_WINDOW_SIZE];

View File

@@ -480,11 +480,9 @@ public final class HttpResponseCacheTest {
}
@Test public void serverDisconnectsPrematurelyWithNoLengthHeaders() throws IOException {
/*
* Intentionally empty. This case doesn't make sense because there's no
* such thing as a premature disconnect when the disconnect itself
* indicates the end of the data stream.
*/
// Intentionally empty. This case doesn't make sense because there's no
// such thing as a premature disconnect when the disconnect itself
// indicates the end of the data stream.
}
private void testServerPrematureDisconnect(TransferKind transferKind) throws IOException {
@@ -646,10 +644,8 @@ public final class HttpResponseCacheTest {
}
@Test public void maxAgeInThePastWithDateHeaderButNoLastModifiedHeader() throws Exception {
/*
* Chrome interprets max-age relative to the local clock. Both our cache
* and Firefox both use the earlier of the local and server's clock.
*/
// Chrome interprets max-age relative to the local clock. Both our cache
// and Firefox both use the earlier of the local and server's clock.
assertNotCached(new MockResponse().addHeader("Date: " + formatDate(-120, TimeUnit.SECONDS))
.addHeader("Cache-Control: max-age=60"));
}
@@ -719,10 +715,8 @@ public final class HttpResponseCacheTest {
}
private void testRequestMethod(String requestMethod, boolean expectCached) throws Exception {
/*
* 1. seed the cache (potentially)
* 2. expect a cache hit or miss
*/
// 1. seed the cache (potentially)
// 2. expect a cache hit or miss
server.enqueue(new MockResponse().addHeader("Expires: " + formatDate(1, TimeUnit.HOURS))
.addHeader("X-Response-ID: 1"));
server.enqueue(new MockResponse().addHeader("X-Response-ID: 2"));
@@ -756,11 +750,9 @@ public final class HttpResponseCacheTest {
}
private void testMethodInvalidates(String requestMethod) throws Exception {
/*
* 1. seed the cache
* 2. invalidate it
* 3. expect a cache miss
*/
// 1. seed the cache
// 2. invalidate it
// 3. expect a cache miss
server.enqueue(
new MockResponse().setBody("A").addHeader("Expires: " + formatDate(1, TimeUnit.HOURS)));
server.enqueue(new MockResponse().setBody("B"));
@@ -841,10 +833,8 @@ public final class HttpResponseCacheTest {
}
@Test public void partialRangeResponsesDoNotCorruptCache() throws Exception {
/*
* 1. request a range
* 2. request a full document, expecting a cache miss
*/
// 1. request a range
// 2. request a full document, expecting a cache miss
server.enqueue(new MockResponse().setBody("AA")
.setResponseCode(HttpURLConnection.HTTP_PARTIAL)
.addHeader("Expires: " + formatDate(1, TimeUnit.HOURS))

View File

@@ -1701,11 +1701,9 @@ public final class URLConnectionTest {
}
@Test public void readTimeouts() throws IOException {
/*
* This relies on the fact that MockWebServer doesn't close the
* connection after a response has been sent. This causes the client to
* try to read more bytes than are sent, which results in a timeout.
*/
// This relies on the fact that MockWebServer doesn't close the
// connection after a response has been sent. This causes the client to
// try to read more bytes than are sent, which results in a timeout.
MockResponse timeout =
new MockResponse().setBody("ABC").clearHeaders().addHeader("Content-Length: 4");
server.enqueue(timeout);

View File

@@ -120,10 +120,8 @@ public final class MockSpdyServer {
logger.log(Level.WARNING, "MockWebServer connection failed", e);
}
/*
* This gnarly block of code will release all sockets and
* all thread, even if any close fails.
*/
// This gnarly block of code will release all sockets and
// all thread, even if any close fails.
try {
serverSocket.close();
} catch (Throwable e) {