As a best-selling author, I invite you to explore my books on Amazon. Don't forget to follow me on Medium and show your support. Thank you! Your support means the world!
Network Performance Optimization with JavaScript: A Practical Guide
In modern web development, optimizing network performance is crucial for creating responsive applications. I'll share proven techniques that have significantly improved performance in my projects.
API Request Batching
Instead of making multiple individual API calls, combining them into a single request reduces network overhead. Here's how I implement this:
class RequestBatcher {
constructor(batchSize = 10, delay = 100) {
this.queue = [];
this.batchSize = batchSize;
this.delay = delay;
this.timeoutId = null;
}
add(request) {
return new Promise((resolve, reject) => {
this.queue.push({ request, resolve, reject });
this.scheduleBatch();
});
}
async processBatch() {
const batch = this.queue.splice(0, this.batchSize);
const requests = batch.map(item => item.request);
try {
const response = await fetch('/api/batch', {
method: 'POST',
body: JSON.stringify({ requests }),
headers: { 'Content-Type': 'application/json' }
});
const results = await response.json();
batch.forEach((item, index) => {
item.resolve(results[index]);
});
} catch (error) {
batch.forEach(item => item.reject(error));
}
}
scheduleBatch() {
if (this.timeoutId) clearTimeout(this.timeoutId);
this.timeoutId = setTimeout(() => this.processBatch(), this.delay);
}
}
Response Caching
Implementing effective caching strategies reduces server load and improves response times. Here's a Service Worker implementation:
// service-worker.js
const CACHE_NAME = 'app-cache-v1';
self.addEventListener('fetch', event => {
event.respondWith(
caches.match(event.request).then(response => {
if (response) return response;
return fetch(event.request).then(response => {
if (!response || response.status !== 200 || response.type !== 'basic') {
return response;
}
const responseToCache = response.clone();
caches.open(CACHE_NAME).then(cache => {
cache.put(event.request, responseToCache);
});
return response;
});
})
);
});
Streaming Data
Processing large datasets becomes more efficient with streaming:
async function streamData(url) {
const response = await fetch(url);
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
while (true) {
const {done, value} = await reader.read();
if (done) break;
buffer += decoder.decode(value, {stream: true});
const lines = buffer.split('\n');
buffer = lines.pop();
for (const line of lines) {
processDataChunk(JSON.parse(line));
}
}
}
function processDataChunk(data) {
// Process individual data chunks
}
Prefetching Resources
I implement intelligent prefetching to load resources before they're needed:
class ResourcePrefetcher {
constructor() {
this.prefetchedUrls = new Set();
this.observer = new IntersectionObserver(this.handleIntersection.bind(this));
}
observe(elements) {
elements.forEach(element => {
if (element.dataset.prefetch) {
this.observer.observe(element);
}
});
}
handleIntersection(entries) {
entries.forEach(entry => {
if (entry.isIntersecting) {
const url = entry.target.dataset.prefetch;
this.prefetchResource(url);
this.observer.unobserve(entry.target);
}
});
}
prefetchResource(url) {
if (this.prefetchedUrls.has(url)) return;
const link = document.createElement('link');
link.rel = 'prefetch';
link.href = url;
document.head.appendChild(link);
this.prefetchedUrls.add(url);
}
}
Connection Pooling
Maintaining persistent connections improves performance for frequent communications:
class WebSocketPool {
constructor(url, poolSize = 5) {
this.url = url;
this.poolSize = poolSize;
this.connections = [];
this.currentIndex = 0;
this.initialize();
}
initialize() {
for (let i = 0; i < this.poolSize; i++) {
this.connections.push(new WebSocket(this.url));
}
}
getConnection() {
const connection = this.connections[this.currentIndex];
this.currentIndex = (this.currentIndex + 1) % this.poolSize;
return connection;
}
send(data) {
const connection = this.getConnection();
if (connection.readyState === WebSocket.OPEN) {
connection.send(JSON.stringify(data));
}
}
}
Request Prioritization
Managing request priorities ensures critical resources load first:
class RequestQueue {
constructor() {
this.highPriority = [];
this.lowPriority = [];
this.processing = false;
}
async add(request, priority = 'low') {
const queue = priority === 'high' ? this.highPriority : this.lowPriority;
queue.push(request);
if (!this.processing) {
this.processing = true;
await this.processQueue();
}
}
async processQueue() {
while (this.highPriority.length || this.lowPriority.length) {
const request = this.highPriority.shift() || this.lowPriority.shift();
await this.processRequest(request);
}
this.processing = false;
}
async processRequest(request) {
try {
await fetch(request.url, request.options);
} catch (error) {
console.error('Request failed:', error);
}
}
}
Compression
Implementing compression reduces data transfer sizes:
class CompressionHandler {
constructor() {
this.compressionStream = new CompressionStream('gzip');
}
async compressData(data) {
const blob = new Blob([JSON.stringify(data)]);
const compressed = blob.stream().pipeThrough(this.compressionStream);
return new Response(compressed).blob();
}
async sendCompressedData(url, data) {
const compressed = await this.compressData(data);
return fetch(url, {
method: 'POST',
body: compressed,
headers: {
'Content-Type': 'application/json',
'Content-Encoding': 'gzip'
}
});
}
}
Monitoring and Analytics
I always implement performance monitoring to measure the impact of these optimizations:
class PerformanceMonitor {
constructor() {
this.metrics = {};
}
startMeasurement(label) {
this.metrics[label] = performance.now();
}
endMeasurement(label) {
if (this.metrics[label]) {
const duration = performance.now() - this.metrics[label];
this.logMetric(label, duration);
delete this.metrics[label];
}
}
logMetric(label, duration) {
console.log(`${label}: ${duration.toFixed(2)}ms`);
// Send to analytics service
}
}
These techniques form a comprehensive approach to network optimization. When implemented correctly, they can significantly improve application performance and user experience.
Remember to measure performance before and after implementing these optimizations. Different applications may benefit from different combinations of these techniques, so it's essential to test and monitor their impact in your specific context.
The key to successful optimization is continuous monitoring and adjustment. As your application evolves, regularly review and update your optimization strategies to maintain optimal performance.
101 Books
101 Books is an AI-driven publishing company co-founded by author Aarav Joshi. By leveraging advanced AI technology, we keep our publishing costs incredibly low—some books are priced as low as $4—making quality knowledge accessible to everyone.
Check out our book Golang Clean Code available on Amazon.
Stay tuned for updates and exciting news. When shopping for books, search for Aarav Joshi to find more of our titles. Use the provided link to enjoy special discounts!
Our Creations
Be sure to check out our creations:
Investor Central | Investor Central Spanish | Investor Central German | Smart Living | Epochs & Echoes | Puzzling Mysteries | Hindutva | Elite Dev | JS Schools
We are on Medium
Tech Koala Insights | Epochs & Echoes World | Investor Central Medium | Puzzling Mysteries Medium | Science & Epochs Medium | Modern Hindutva
Top comments (0)