Demo
package main
import (
"context"
"fmt"
"log"
"time"
"golang.org/x/sync/semaphore"
)
// Example_workerPool demonstrates how to use a semaphore to limit the number of
// goroutines working on parallel tasks.
//
// This use of a semaphore mimics a typical “worker pool” pattern, but without
// the need to explicitly shut down idle workers when the work is done.
func main() {
ctx := context.TODO()
var (
maxWorkers = 2
sem = semaphore.NewWeighted(int64(maxWorkers))
out = make([]int, 6)
)
// Compute the output using up to maxWorkers goroutines at a time.
for i := range out {
// When maxWorkers goroutines are in flight, Acquire blocks until one of the
// workers finishes.
fmt.Println("wanna acquire", i, time.Now())
if err := sem.Acquire(ctx, 1); err != nil {
log.Printf("Failed to acquire semaphore: %v", err)
break
}
fmt.Println("acquired", i, time.Now())
go func(i int) {
defer sem.Release(1)
out[i] = collatzSteps(i + 1)
fmt.Println("release", i, time.Now())
}(i)
}
// Acquire all of the tokens to wait for any remaining workers to finish.
//
// If you are already waiting for the workers by some other means (such as an
// errgroup.Group), you can omit this final Acquire call.
if err := sem.Acquire(ctx, int64(maxWorkers)); err != nil {
log.Printf("Failed to acquire semaphore: %v", err)
}
fmt.Println(out)
}
// collatzSteps computes the number of steps to reach 1 under the Collatz
// conjecture. (See https://en.wikipedia.org/wiki/Collatz_conjecture.)
func collatzSteps(n int) (steps int) {
if n <= 0 {
panic("nonpositive input")
}
for ; n > 1; steps++ {
if steps < 0 {
panic("too many steps")
}
if n%2 == 0 {
n /= 2
continue
}
const maxInt = int(^uint(0) >> 1)
if n > (maxInt-1)/3 {
panic("overflow")
}
n = 3*n + 1
}
time.Sleep(2 * time.Second)
return steps
}
output:
wanna acquire 0 2021-07-29 21:29:55.972384 +0800 +08 m=+0.000192241
acquired 0 2021-07-29 21:29:55.97253 +0800 +08 m=+0.000338301
wanna acquire 1 2021-07-29 21:29:55.972537 +0800 +08 m=+0.000345751
acquired 1 2021-07-29 21:29:55.97254 +0800 +08 m=+0.000348581
wanna acquire 2 2021-07-29 21:29:55.972543 +0800 +08 m=+0.000351301
release 0 2021-07-29 21:29:57.974328 +0800 +08 m=+2.002084988
release 1 2021-07-29 21:29:57.974341 +0800 +08 m=+2.002097828
acquired 2 2021-07-29 21:29:57.974401 +0800 +08 m=+2.002157377
wanna acquire 3 2021-07-29 21:29:57.974408 +0800 +08 m=+2.002164457
acquired 3 2021-07-29 21:29:57.974414 +0800 +08 m=+2.002170807
wanna acquire 4 2021-07-29 21:29:57.974418 +0800 +08 m=+2.002175267
release 3 2021-07-29 21:29:59.974625 +0800 +08 m=+4.002330347
acquired 4 2021-07-29 21:29:59.974662 +0800 +08 m=+4.002367247
wanna acquire 5 2021-07-29 21:29:59.974668 +0800 +08 m=+4.002373527
release 2 2021-07-29 21:29:59.97462 +0800 +08 m=+4.002325277
acquired 5 2021-07-29 21:29:59.974684 +0800 +08 m=+4.002389377
release 4 2021-07-29 21:30:01.977041 +0800 +08 m=+6.004694503
release 5 2021-07-29 21:30:01.977047 +0800 +08 m=+6.004701013
[0 1 7 2 5 8]
Source Code
type waiter struct {
n int64
ready chan<- struct{} // Closed when semaphore acquired.
}
// NewWeighted creates a new weighted semaphore with the given
// maximum combined weight for concurrent access.
func NewWeighted(n int64) *Weighted {
w := &Weighted{size: n}
return w
}
// Weighted provides a way to bound concurrent access to a resource.
// The callers can request access with a given weight.
type Weighted struct {
size int64
cur int64
mu sync.Mutex
waiters list.List
}
// Acquire acquires the semaphore with a weight of n, blocking until resources
// are available or ctx is done. On success, returns nil. On failure, returns
// ctx.Err() and leaves the semaphore unchanged.
//
// If ctx is already done, Acquire may still succeed without blocking.
func (s *Weighted) Acquire(ctx context.Context, n int64) error {
s.mu.Lock()
if s.size-s.cur >= n && s.waiters.Len() == 0 {
s.cur += n
s.mu.Unlock()
return nil
}
if n > s.size {
// Don't make other Acquire calls block on one that's doomed to fail.
s.mu.Unlock()
<-ctx.Done()
return ctx.Err()
}
ready := make(chan struct{})
w := waiter{n: n, ready: ready}
elem := s.waiters.PushBack(w)
s.mu.Unlock()
select {
case <-ctx.Done():
err := ctx.Err()
s.mu.Lock()
select {
case <-ready:
// Acquired the semaphore after we were canceled. Rather than trying to
// fix up the queue, just pretend we didn't notice the cancelation.
err = nil
default:
isFront := s.waiters.Front() == elem
s.waiters.Remove(elem)
// If we're at the front and there're extra tokens left, notify other waiters.
if isFront && s.size > s.cur {
s.notifyWaiters()
}
}
s.mu.Unlock()
return err
case <-ready:
return nil
}
}
func (s *Weighted) notifyWaiters() {
for {
next := s.waiters.Front()
if next == nil {
break // No more waiters blocked.
}
w := next.Value.(waiter)
if s.size-s.cur < w.n {
// Not enough tokens for the next waiter. We could keep going (to try to
// find a waiter with a smaller request), but under load that could cause
// starvation for large requests; instead, we leave all remaining waiters
// blocked.
//
// Consider a semaphore used as a read-write lock, with N tokens, N
// readers, and one writer. Each reader can Acquire(1) to obtain a read
// lock. The writer can Acquire(N) to obtain a write lock, excluding all
// of the readers. If we allow the readers to jump ahead in the queue,
// the writer will starve — there is always one token available for every
// reader.
break
}
s.cur += w.n
s.waiters.Remove(next)
// When close this channel, receivers won't be blocked anymore and will be notified
close(w.ready)
}
}