{"payload":{"header_redesign_enabled":false,"results":[{"id":"728909209","archived":false,"color":"#3572A5","followers":37,"has_funding_file":false,"hl_name":"hkproj/pytorch-transformer-distributed","hl_trunc_description":"Distributed training (multi-node) of a Transformer model","language":"Python","mirror":false,"owned_by_organization":false,"public":true,"repo":{"repository":{"id":728909209,"name":"pytorch-transformer-distributed","owner_id":388315,"owner_login":"hkproj","updated_at":"2024-04-10T16:56:21.279Z","has_issues":true}},"sponsorable":false,"topics":["machine-learning","tutorial","deep-learning","pytorch","data-parallelism","model-parallelism","distributed-training","gradient-accumulation","distributed-data-parallel","collective-communication"],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":0,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":84,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253Ahkproj%252Fpytorch-transformer-distributed%2B%2Blanguage%253APython","metadata":null,"warn_limited_results":false,"csrf_tokens":{"/hkproj/pytorch-transformer-distributed/star":{"post":"pvXOtO4JNhvmv2zvdz4SC_DG5BeNu6FPpmzQ4MLD3mFUVqBFfEHlyCwPus8Kk-bhZIffNOQg0gxvJt7hTpeOBQ"},"/hkproj/pytorch-transformer-distributed/unstar":{"post":"nUF4A_SL8_DBijC93XbzKUKmBDcLoWEdFwCdgSxwrkJ1YzvFFKwTPBYztu36_6bw5darbFxQROf0cFu_V9ffBw"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"jE6QU6t__ezID5fuBwC8B9PQ7b4yDtrQXyd_6YivGc9Y_ydhBL85-XP5Q1Y6h9XeUHiI3Cm-dPIT3o20ggmu7A"}}},"title":"Repository search results"}